././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.789891 sahara-16.0.0/0000775000175000017500000000000000000000000013060 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/.coveragerc0000664000175000017500000000021000000000000015172 0ustar00zuulzuul00000000000000[run] branch = True source = sahara omit = .tox/* sahara/tests/* [paths] source = sahara [report] ignore_errors = True precision = 3././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/.stestr.conf0000664000175000017500000000006300000000000015330 0ustar00zuulzuul00000000000000[DEFAULT] test_path=./sahara/tests/unit top_dir=./ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/.zuul.yaml0000664000175000017500000001131600000000000015023 0ustar00zuulzuul00000000000000- project: templates: - openstack-lower-constraints-jobs - openstack-python3-wallaby-jobs - periodic-stable-jobs - publish-openstack-docs-pti - check-requirements - release-notes-jobs-python3 check: jobs: - openstack-tox-pylint: voting: false - sahara-tests-scenario: voting: false - sahara-tests-scenario-v2: voting: false - sahara-tests-tempest - sahara-tests-tempest-v2 - openstack-tox-cover: voting: false - sahara-grenade: voting: false - sahara-openstack-ansible-functional: voting: false gate: queue: sahara jobs: - sahara-tests-scenario: voting: false - sahara-tests-scenario-v2: voting: false - sahara-tests-tempest - sahara-tests-tempest-v2 # - sahara-grenade - sahara-openstack-ansible-functional: voting: false experimental: jobs: - sahara-buildimages-ambari - sahara-buildimages-cloudera - sahara-buildimages-mapr - sahara-buildimages-spark - sahara-tests-scenario-multinode-spark - job: name: sahara-grenade parent: grenade required-projects: - opendev.org/openstack/grenade - opendev.org/openstack/sahara - opendev.org/openstack/python-saharaclient - opendev.org/openstack/heat - opendev.org/openstack/heat-tempest-plugin - opendev.org/openstack/python-heatclient - opendev.org/openstack/sahara-tests - opendev.org/openstack/sahara-plugin-ambari - opendev.org/openstack/sahara-plugin-cdh - opendev.org/openstack/sahara-plugin-mapr - opendev.org/openstack/sahara-plugin-spark - opendev.org/openstack/sahara-plugin-storm - opendev.org/openstack/sahara-plugin-vanilla vars: grenade_localrc: RUN_HEAT_INTEGRATION_TESTS: False grenade_devstack_localrc: shared: IMAGE_URLS: http://tarballs.openstack.org/heat-test-image/fedora-heat-test-image.qcow2,https://cloud-images.ubuntu.com/xenial/current/xenial-server-cloudimg-amd64-disk1.img devstack_local_conf: test-config: $TEMPEST_CONFIG: data_processing: test_image_name: xenial-server-cloudimg-amd64-disk1 test_ssh_user: ubuntu data-processing-feature-enabled: s3: True devstack_plugins: sahara: https://opendev.org/openstack/sahara heat: https://opendev.org/openstack/heat devstack_services: h-api: true h-api-cfn: true h-eng: true heat: true tls-proxy: false tempest_plugins: - sahara-tests - heat-tempest-plugin tempest_test_regex: ^(sahara_tempest_plugin.tests.) tox_envlist: all group-vars: subnode: devstack_services: tls-proxy: false irrelevant-files: - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^doc/.*$ - ^sahara/locale/.*$ - ^sahara/tests/unit/.*$ - ^releasenotes/.*$ - ^tools/.*$ - ^tox.ini$ - job: name: sahara-tripleo-ci-centos-7-scenario003-standalone description: | Basic TripleO standalone job with all Sahara API tests parent: tripleo-ci-centos-7-scenario003-standalone vars: featureset_override: tempest_test_whitelist: - 'sahara_tempest_plugin.tests.api.' - job: name: sahara-openstack-ansible-functional description: | OSA job focused on openstack-ansible-os_sahara parent: openstack-ansible-cross-repo-functional required-projects: - openstack/openstack-ansible-os_sahara vars: osa_test_repo: openstack/openstack-ansible-os_sahara devstack_localrc: USE_PYTHON3: True - job: name: sahara-buildimages-base nodeset: centos-8 vars: sahara_src_dir: src/opendev.org/openstack/sahara run: playbooks/buildimages/run.yaml timeout: 7200 required-projects: - openstack/sahara - openstack/sahara-plugin-ambari - openstack/sahara-plugin-cdh - openstack/sahara-plugin-mapr - openstack/sahara-plugin-spark - openstack/sahara-plugin-storm - openstack/sahara-plugin-vanilla - job: name: sahara-buildimages-ambari parent: sahara-buildimages-base vars: sahara_plugin: ambari - job: name: sahara-buildimages-cloudera parent: sahara-buildimages-base vars: sahara_plugin: cdh - job: name: sahara-buildimages-mapr parent: sahara-buildimages-base vars: sahara_plugin: mapr - job: name: sahara-buildimages-spark parent: sahara-buildimages-base vars: sahara_plugin: spark ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/AUTHORS0000664000175000017500000002710500000000000014135 0ustar00zuulzuul00000000000000Abbass Marouni Abhishek Chanda Adrien Vergé Akanksha Agrawal Alberto Planas Alexander Aleksiyants Alexander Ignatov Alexander Kuznetsov Alexandra Settle Alina Nesterova Alok Jani Andreas Jaeger Andreas Jaeger Andrew Lazarev Andrey Pavlov Anh Tran Anusree ArchiFleKs Artem Osadchiy Artem Osadchyi Ashish Billore Atsushi SAKAI Bass T Bo Wang Bob Nettleton Brandon James Cao Xuan Hoang Chad Roberts Chandan Kumar Chang Bo Guo ChangBo Guo(gcb) Charles Short Chris Buccella Chris Buccella Christian Berendt Christian Berendt Colleen Murphy Corey Bryant Daniel Gonzalez Daniele Venzano Dao Cong Tien Davanum Srinivas Deliang Fan Demid Dementev Denis Egorenko DennyZhang Dexter Fryar Dina Belova Dirk Mueller Dmitry Mescheryakov Dong Ma Doug Hellmann Duan Jiong Elise Gafford Elod Illes Emilien Macchi Eohyung Lee Erik Bergenholtz Ernst Sjöstrand Ethan Gafford Evgeny Sikachev Fang Jinxing Fengqian Gao Flavio Percoco Francesco Vollero Francois Deppierraz Ghanshyam Mann Graham Hayes Grigoriy Roghkov Grigoriy Rozhkov Guo Shan Gyorgy Szombathelyi Gökhan IŞIK Ha Van Tu Hareesh Puthalath He Yongli Hervé Beraud Hironori Shiina Hongbin Lu Hui HX Xiang Ian Wienand Ihar Hrachyshka Ilya Tyaptin Ivoline Ngong Iwona Kotlarska Jacob Bin Wang James E. Blair Jamie Lennox Javeme Javier Pena Jaxon Wang Jeremy Freudberg Jeremy Liu Jeremy Stanley Jesse Pretorius JiHyunSong Jinay Vora Jinxing Fang Joe Gordon John Garbutt John Speidel Jon Maron Jonathan Halterman Jonathan Jozwiak Joseph D Natoli JuPing Julian Sy Julien Danjou Kazuki OIKAWA Kazuki Oikawa Kazuki Oikawa Ken Chen Kevin Vasko Khanh-Toan Tran Konovalov-Nik Lawrence Davison Li, Chen LiuNanke Luigi Toscano Lujin Luo Luong Anh Tuan M V P Nitesh Manishanker Talusani Marc Solanas Maria Malyarova Marianne Linhares Marianne Linhares Monteiro Markus Zoeller Martin Kletzander Mate Lakat Matthew Edmonds Matthew Farrellee Matthew Treinish Maxence Dalmais Michael Ionkin Michael Krotscheck Michael Lelyakin Michael McCune Michael McCune Mikhail Mikhail Lelyakin Mimansa Mohammed Naser Monty Taylor Nadya Privalova Nam Nguyen Hoai Ngo Quoc Cuong Nguyen Hai Nguyen Hai Truong Nguyen Hung Phuong Nicolas Haller Nicolas Haller Nikita Konovalov Nikolay Mahotkin Nikolay Starodubtsev Nirmal Ranganathan Oleg Borisenko Ondřej Nový OpenStack Release Bot PanFengyun Patrick Amor PavlovAndrey Pedro Navarro Pierre Padrixe Pritesh Kothari Rafik Renat Akhmerov Rich Bowen Robert Levas Ronald Bradford Ruslan Kamaldinov Sarvesh Ranjan Sean Dague Sean McGinnis Sergey Gotliv Sergey Lukjanov Sergey Lukjanov Sergey Lukjanov Sergey Reshetnyak Sergey Reshetnyak Sergey Vilgelm Shail Bhargava Sharan Kumar Monikanta Rajan Shilla Saebi Shu Yingya Shuquan Huang Sofiia Kostiuchenko Steve Kowalik SunAnChen Susanne Balle Takashi Kajinami Tang Chen Telles Nobrega Telles Nobrega Telles Nobrega Tetiana Lashchova Thierry Carrez Thomas Bechtold Thomas Goirand Tim Kelsey Tim Millican Tin Lam Tingting Bao Travis McPeak Trevor McKay Vadim Rovachev Velmurugan Kumar Venkateswarlu Pallamala Victor Sergeyev Vinod Pandarinathan Vitaliy Levitski Vitaly Gridnev Vitaly Gridnev William Stevenson Xi Yang XiaBing Yao Xinyuan Huang Yaroslav Lobankov Yuanbin.Chen Zhao Lei Zhenguo Niu ZhiQiang Fan ZhongShengping Zhongyue Luo Zhuang Changkun akhiljain23 artemosadchiy bhujay caowei caoyue chao liu chenpengzi <1523688226@qq.com> chenxing deepakmourya dmitryme gaobin gecong1973 ghanshyam groghkov hejunli <18438695326@163.com> huang.zhiping iberezovskiy inspurericzhang jacobliberman jeremyfreudberg jiasen.lin kangyufei kavithahr kk kk lcsong lei-zhang-99cloud leiyashuai liuqing llg8212 lu huichun luhuichun maaoyu makocchi mandar mathspanda melissaml msionkin nizam npraveen35 pangliye pawnesh.kumar pengyuesheng pratik-gadiya qiaomin qiufossen ricolin ruifaling shangxiaobj sharat.sharma singinforest skostiuchenkoHDP songwenping sun cheng sunyandi sven mark svenmark tanlin taoguo venkatamahesh vrovachev wangdequn wanghuagong wangqi weiting-chen wu.chunyang wu.chunyang wuchunyang xiexs xuhaigang yangxurong yangyapeng yangyong yaseminti yatin yingya.shu yrunts yrunts zhang.lei zhangbailin zhanghongtao zhangxuanyuan zhangyanxian zhaoleilc <15247232416@163.com> zhaorenming zhouyunfeng zhufl zhuli zhulingjie “leiyashuai” <“leiyashuai@inspur.com”> ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/CONTRIBUTING.rst0000664000175000017500000000116300000000000015522 0ustar00zuulzuul00000000000000The source repository for this project can be found at: https://opendev.org/openstack/sahara Pull requests submitted through GitHub are not monitored. To start contributing to OpenStack, follow the steps in the contribution guide to set up and use Gerrit: https://docs.openstack.org/contributors/code-and-documentation/quick-start.html Bugs should be filed on Storyboard: https://storyboard.openstack.org/#!/project/openstack/sahara For more specific information about contributing to this repository, see the sahara contributor guide: https://docs.openstack.org/sahara/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/ChangeLog0000664000175000017500000046456100000000000014652 0ustar00zuulzuul00000000000000CHANGES ======= 16.0.0 ------ * Update TOX\_CONSTRAINTS\_FILE for stable/yoga * Update .gitreview for stable/yoga * Remove reference to deprecated RequestContext.user * Fix compatibility with oslo.context >= 4.0.0 16.0.0.0rc1 ----------- * Solve the problem that the code in the xml file is not clear * Let the code in the json file be displayed * Make hidden code appear in admin guide * Remove the suffix of \*console\* * Make unclear code clear in user guide * Remove the suffix of \*console\* * Remove the console suffix * using code-block match console section to show the content * code-block replaces sourcecode to clearly display the content * code-block replaces the soucecode in install docs section * change the code-block header of the docs * Remove unicode from code * Remove unicode from code in release note conf * Remove unicode from code in api-ref conf * Remove unicode from code in doc conf * Substring matching image's name * Update master for stable/xena 15.0.0 ------ * Remove six * Enable healthcheck middleware by default * Preinstall requirements for docs build * Drop support for Block Storage API v2 * docs: Update Freenode to OFTC * health check error about SqlAlchemy * delete validation error cluster * setup.cfg: Replace dashes with underscores * [goal] Deprecate the JSON formatted policy file * Update master for stable/wallaby 14.0.0 ------ * Fix lower requirements: bump various packages * gate fix: no removed tail\_log in devstack, disable grenade * Fix the isolated exception message * Replace deprecated UPPER\_CONSTRAINTS\_FILE variable * Add Python3 wallaby unit tests * Update master for stable/victoria 13.0.0 ------ * Remove the unused coding style modules * Focal-related fixes: libguestfs, mysql 8, requirements * Correct some typos in some strings * zuul: remove tripleo jobs * Switch from unittest2 compat methods to Python 3.x methods * Remove translation sections from setup.cfg * drop mock from lower-constraints * Stop to use the \_\_future\_\_ module * Cap jsonschema 3.2.0 as the minimal version * Fix duplicated words issue like "is is provided to" * Fix hacking min version to 3.0.1 * Monkey patch original current\_thread \_active * Imported Translations from Zanata * Add py38 package metadata * Use flask's builtin methods for test context * Imported Translations from Zanata * Bump default tox env from py37 to py38 * Add Python3 victoria unit tests * Update master for stable/ussuri 12.0.0.0rc1 ----------- * Ussuri contributor docs community goal * Native zuul v3 grenade job, cleanup -py3 * Update hacking for Python3 * Use unittest.mock instead of third party mock * Cleanup Python 2.7 support * (Temporarily) skip TestVerifications tests * Fix syntax error in image widths * [ussuri][goal] Drop python 2.7 support and testing * Migrate grenade jobs to py3 * Fix misspell word * fix invaild link of installation guide in Sahara UI User Guide * Switch to Ussuri jobs * grenade: start from train, disable heat integration tests * Python 3 fixes * Update master for stable/train 11.0.0.0rc1 ----------- * Update the constraints url * Add more cross-functional jobs (TripleO, OSA) * Fix unit tests: no more cinderclient v1 * Fixing broken links and removing outdated driver * Update api-ref location * Remove a monkey-patching workaround for python < 2.7.3 * Limit envlist to py37 for Python 3 Train goal * Imported Translations from Zanata * Imported Translations from Zanata * Bump the openstackdocstheme extension to 1.20 * devstack: do not use configure\_auth\_token\_middleware * Blacklist python-cinderclient 4.0.0 * Remove some files not worth maintaining * Update keystone\_authtoken config reference * Fix requirements (bandit, sphinx, jsonschema) and jobs * Update Python 3 test runtimes for Train * Add a required dep to fix the buildimages jobs * [Trivial fix]Remove unnecessary slash * doc: additional git.openstack.org->opendev.org replacement * Replace git.openstack.org URLs with opendev.org URLs * OpenDev Migration Patch * Dropping the py35 testing * Replace openstack.org git:// URLs with https:// * Imported Translations from Zanata * Update master for stable/stein 10.0.0 ------ * doc: refer to the split plugin documentation * Making Sahara Python 3 compatible * grenade: re-enable, really test rocky->master * Fix the lower-requirements job: libpq-dev, psycopg 2.7 * Add missing ws seperator between words * Use authorize instead of enforce for policies * Fixing policies inconsistencies * Add API v2 jobs (scenario, tempest); buildimages fixes * add python 3.7 unit test job * Adapt to the additional rules from pycodestyle 2.5.0 * Fixing NTP issues for CDH plugin * Adding spark build image job * Changing hdfs fs to hdfs dfs * Dynamically loading plugins * Add missing ws separator between words * Make sure that default\_ntp\_server option is exported * Fix version discovery for Python 3 10.0.0.0b1 ---------- * Prepare Sahara core for plugin split * Declare APIv2 stable and CURRENT * Give the illusion of microversion support * Some polish for APIv2 * API v2: fix "local variable 'c' referenced before assignment" * APIv2 - Fix 500 on malformed query string on * Enhance boot from volume * APIv2 - api-ref documentation for APIv2 * Deploying Sahara with unversioned endpoints * Fix validation of job binary with Python3 * Migrate away from oslo\_i18n.enable\_lazy() * APIv2 Changing return payload to project\_id * Fixing cluster scale * doc: Fix the snippet in "The Script Validator" section * String-related fixes for Python 3 * fixed word error * Add DEBIAN\_FRONTEND=noninteractive in front of apt-get install commands * Bump the version of hacking to 1.1.0, with few fixes * Update devel info: mailing list, meeting time * Update http link to https * Add python 3.6 unit test job * Add framework for sahara-status upgrade check * doc: restructure the image building documentation * Fixing image validation for Ambari 2.3 * Cleanup tox.ini constraint handling * Increase the startup time of ambari-server to 180s * Increment versioning with pbr instruction * Fix a typo on Storm plugin cluster info (Strom -> Storm) * sahara-image-pack: use curl for tarballs.openstack.org * sahara-image-pack: remove bashisms from shell scripts * adds unit test for ssh\_remote.replace\_remote\_line * Force the format of ssh key to PEM, at least for now * Add template param for ambari pkg install timeout * Use templates lower-constraints, update cover job * grenade: relevant fixes for master (sahara-api/apache) * doc: update distro information and cloud-init users * Fixed link for more information about Ambari images * Correct repo\_id\_map for hdp 2.5 * Make sahara-grenade job voting on the "gate" queue too * Import the legacy grenade sahara job * Correct Hbase ports in Ambari plugin * Fixing anti-affinity for Sahara * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * Imported Translations from Zanata * Update reno for stable/rocky 9.0.0.0rc1 ---------- * Imported Translations from Zanata * Adapt to Keystone changes: use member instead of Member * Add some S3 doc * Enable also ambari by default in devstack * Another small fix for cluster creation on APIv2 * S3 data source URL format change * Sets correct permission for /etc/hosts * Fixing cluster creation on APIv2 * Allow overriding of /etc/hosts entries 9.0.0.0b3 --------- * Enable mutable config in sahara * Adding Ambari 2.6 to image pack * Adding Storm 1.2.0 and 1.2.1 * Unversioned endpoint recommendation * api-ref: move to a v1.1 sub-folder * Trivial: Update Zuul Status Page to correct URL * Switch make\_json\_error back to being a function * Final fixup to APIv2 responses * Deprecate sahara-all * Switch hive\_enable\_db\_notification's default value * S3 data source * Switch the coverage tox target to stestr * Updating Spark versions * Fixing extjs check on cdh and mapr * Switch ostestr to stestr * Bump Flask version according requirements * Fix flask.request.content\_length is None * Use register\_error\_handler to register make\_json\_error * Boot from volume * Remove any reference to pre-built images * Updating plugins status for Rocky * Adding CDH 5.13 * Replace the deleted keypair in clusters for API v2 * Better default value for domain in swift config * Improve force delete * Updated oozie version * Fix the code repository for clone action * add release notes to readme.rst * doc: light cleanup of the ironic-integration page * doc: external link helper for other projects' doc * Update the command to change the hostname 9.0.0.0b2 --------- * fix tox python3 overrides * Check node processes earlier * [APIv2]Consolidate cluster creation endpoints * Add support to deploy hadoop 2.7.5 * Restore Ambari with newer JDK security policies * Fixing java version for Ambari * Switch from sahara-file to tarballs.o.o for artifacts * Deploy using wsgi by default * Fix: really install extjs in CDH images at build time * doc: add the redirect for a file recently renamed * Fix the detection of scala version (now https) * Fix the installation of Swift Hadoop connector (Ambari) * Fix the installation of the Swift Hadoop connector (CDH) * fix a typo: s/avaliable/available * Remove the (now obsolete) pip-missing-reqs tox target * Replace Chinese punctuation with English punctuation * Fix the openstack endpoint create failed * Fix: always use kafka 2.2 for CDH 5.11 * Adding Ambari missing versions 9.0.0.0b1 --------- * Extend config-grabbing magic to new oslo.config * Adding ntpdate and Scala to mapr image * Change doc registering-image image message * Remove step upload package to oozie/sharelib * uncap eventlet * Fix MapR dependency on mysql on RHEL * correct lower-constraints * Support of HDP 2.6 * Follow the new PTI for document build * Updated from global requirements * add lower-constraints job * File copy timesout when file is too big * Preload soci-mysql and soci on RHEL7 images * Migration to Storyboard * Updated from global requirements * Updated from global requirements * Updated from global requirements * Adding support for RHEL images * Remove unused module * change python-libguestfs to python-guestfs for ubuntu * Updated from global requirements * Imported Translations from Zanata * Updated from global requirements * Update mysql connection in configuration-guide.rst * Imported Translations from Zanata * Fix Spark EDP job failed in vanilla 2.8.2 * Fix documents title format error * Migrate the artifact link to sahara-extra, use https * Updated from global requirements * Updated from global requirements * Adding Ambari 2.4.2.0 to image gen * Native Zuul v3 jobs (almost all of them) * Change some parameters to be required in api-ref * Fix the parameter in api-ref * Imported Translations from Zanata * Update reno for stable/queens 8.0.0 ----- * Small doc fixes found during doc day * Fixes for the dashboard guide (title, formatting) * Adding Storm doc * Switch sahara swift to work with keystone v3 * Replace chinese quotes * EDP doc: de-emphasize job binary internals (not in v2) * Enable hacking-extensions H204, H205 * Adding sahara-policy-generator.conf * use . instead of source 8.0.0.0b3 --------- * Add support to deploy Hadoop 2.8.2 * Tweak Sahara to make version discovery easier * Various server-side fixes to APIv2 * Fix Flask error\_handler\_spec * Dynamically add python version into launch\_command * Updated from global requirements * Remove use of unsupported TEMPEST\_SERVICES variable * Replace assertFalse/assertTrue(a in b) * Stop abusing [keystone\_authtoken] * Update url links in doc files of Sahara * Updated from global requirements * Changing expected value to job\_template\_id * Updated from global requirements * Updated from global requirements * add bugs link in README.rst * Image generation for MapR * Force deletion of clusters * Rename 'SAHARA\_AUTO\_IP\_ALLOCATION\_ENABLED' config parameter * Use default log levels overriding Sahara-specific only * Decommission of a specific node * Updated from global requirements * RHEL: fix distro detection and EPEL configuration * S3 job binary and binary retriever * Updated from global requirements * Updated from global requirements * Updated from global requirements * [APIv2]Enable APIv2, experimentally 8.0.0.0b2 --------- * Fix scaling validation error * [APIv2]Add ability to export templates to APIv2 * Upgrading Spark to version 2.2 * Updated from global requirements * Updated from global requirements * Remove extra "$" in sahara-on-ironic.rst * [APIv2]Nix custom OpenStack-Project-ID header * Revise the installation guide * [APIv2] Remove job-binary-internal endpoint * Updated from global requirements * Update designate manual installation URL * Update Anti-affinity Feature description * Remove use\_neutron from config * Add kolla installation guide * Update hadoop's distcp command URL * Updated from global requirements * Remove setting of version/release from releasenotes * Updated from global requirements * Update RDO URL * Updated from global requirements * Add ZooKeeper support in Vanilla cluster * Incorrect indent Sahara Installation Guide in sahara * Updated from global requirements * Spark History Server in Vanilla auto sec group * Image generation for CDH 5.11.0 * Use non corrupted libext from image * Policy in code for Sahara 8.0.0.0b1 --------- * Image generation for CDH 5.9.0 * TrivialFix: Redundant alias in import statement * Add Cluster validation before scaling * Image generation for Ambari Plugin * Add NGT resources validation before scaling cluster * Fix typo in advanced-configuration-guide.rst and manager.py * Updated from global requirements * devstack plugin: set two parameters required by Keystone v3 * Allow cluster create with no security groups * Fix Storm 1.1.0 EDP configs * Remove SCREEN\_LOGDIR from devstack setting * Updated from global requirements * Add default configuration files to data\_files * Updated from global requirements * Document glance and manila options in the sample config file * Updated from global requirements * architecture: remove the references to Trove and Zaqar * Re-add .testr.conf, required by the cover test * Updated from global requirements * [ut] replace .testr.conf with .stestr.conf * Fix instances schema doesn't sync with nova instance * fix duplicated ntp configuration * Auth parameters: accept and set few default values * grenade: do not use the removed glance v1 API * Updated from global requirements * Add docs about template portability * Updated from global requirements * Add export of cluster templates * Optimize model relationships (avoid joins, prefer subquery) * writing convention: do not use “-y” for package install * Fix to use "." to source script files * Replace http with https for doc links in sahara * Updated from global requirements * Updated from global requirements * Fix CDH default templates * Fix invalid JSON for Vanilla default cluster template * doc: point to the main git repository and update links * Updated from global requirements * Updated from global requirements * Add CDH validation for attached volume size * doc: generate the list of configuration option * Cleanup the last warning on doc building (html and man) * bindep: depends on gettext (release notes translations) * Imported Translations from Zanata * Update reno for stable/pike 7.0.0.0rc1 ---------- * Adding reno regarding ironic support * Fully switch to keystone authtoken parameters * Fix the broken links * Fix unimplemented abstractmethod * Updated from global requirements * enable heat during devstack installation * Better keystonemiddleware log level * Restructure the documentation according the new spec * Deprecate Spark 1.3.1 * Fix TypeError when get resource list * Fix UnicodeEncoding Error * Enable some off-by-default checks * Fix error during node group template update 7.0.0.0b3 --------- * Updated from global requirements * Support of CDH 5.11.0 * Fix export of node group templates * Bad request exception for unsupported content type * Updated from global requirements * Updated from global requirements * Updating default templates * Updated from global requirements * Image generation for CDH Plugin * Updated from global requirements * Updated from global requirements * Update the documentation link for doc migration * Globalize regex objects * Update Documention link * Updated from global requirements * Enable warnings as errors for doc building * Regenerate sample.config, included in the doc * Fixes the "tox -e docs" warnings * Add export of node group templates * Enable H904 check * Allow proxy\_command to optionally use internal IP * doc: update the configuration of the theme * Update log translation hacking rule * Updated from global requirements * Fix direct patches of methods in test\_versionhandler.py * Add test to sahara/plugins/vanilla/hadoop2/scaling.py * Add test to sahara/plugins/vanilla/hadoop2/run\_scripts.py * doc: switch to openstackdocstheme and add metadata * Fixes a typo in quickstart.rst * Updated from global requirements * Fix wrong patch in unit tests * Updated from global requirements * remove workaround in grenade * Add test to sahara/plugins/vanilla/hadoop2/starting\_scripts.py * Add test to edp\_engine.py * Update dashboard doc * Add test to sahara/plugins/vanilla/hadoop2/oozie\_helper.py * Add test to sahara/plugins/vanilla/hadoop2/config\_helper.py * Add test to sahara/plugins/vanilla/v2\_7\_1/config\_helper.py * Updated from global requirements * Updated from global requirements * Add test to sahara/plugins/vanilla/v2\_7\_1/versionhandler.py * Fixed grenade job * Remove deprecated oslo\_messaging.get\_transport 7.0.0.0b2 --------- * Updated from global requirements * Updated from global requirements * Updated from global requirements * Use neutronclient for all network operations * Changing reconcile to test\_only * Raise better exception for Spark master validation * Support cinder API version 3 * Updated from global requirements * Remove ancient mailmap * Fix the tox environment used for image building * Trivial fix typos in documents * Basic script for pack-based build image * Remove usage of parameter enforce\_type * [APIv2] Refactor job cancel operation * [APIv2] Refactor job refresh status * Updated from global requirements * \_get\_os\_distrib() can return 'redhat', add mapping (2) * [APIv2] Rename oozie\_job\_id * Updated from global requirements * Fixing env vars within bash scripts for image gen * added timeout function in health check function * Remove log translations * Updated from global requirements * Fix doc generation for Python3 * Refactor unit test of cdh plugin * Refactor rest of CDH plugin code * refactor CDH db\_helper * Remove outdated judgment statement * Inefficient validation checks * Remove log translations * [APIv2] Rename hadoop\_version 7.0.0.0b1 --------- * Remove log translations * Adding labels support to Storm * Added support to Storm 1.1.0 * Remove log translations * [Trivial] Remove redundant call to str * Add sem-ver flag so pbr generates correct version * Upgrading Spark version to 2.1.0 * [storm] improve nimbus validation * \_get\_os\_distrib() can return 'redhat', add mapping * Updated from global requirements * [APIv2] Convert update methods to use PATCH * Use HostAddressOpt for opts that accept IP and hostnames * Apply monkeypatching from eventlet before the tests starts * install saharaclient from pypi if not from source * Fix some reST field lists in docstrings * Adds information about using bash to documentation * Deprecate CDH-5.5.0 * Code integration with the abstractions * Remove old oslo.messaging transport aliases * Add ability to install with Apache in devstack * Replaced uuid.uuid4 with uuidutils.generate\_uuid() * Support Job binary pluggability * Fix logging inside of devstack plugin * Add missing tests to ambari/configs.py * Updated from global requirements * Updated from global requirements * Support Data Source pluggability * Add missing tests to plugin ambari * Removing the cdh 5.0,5.3 and 5.4 * Add missing test to ambari client * cors: update default configuration * Indicating the location tests directory in oslo\_debug\_helper * [APIv2] Refactor job execute endpoint * Fixes python syntax error * Remove unused logging import * [APIv2] Further rename endpoint of jobs & job\_executions * Fix api-ref build * Adding missing tests to utils/test\_cluster.py * Update validation unit test for all Vanilla processes * Updated from global requirements * [Fix gate]Update test requirement * Backward slash is missing * Add missing tests to utils/proxy.py * Updated from global requirements * Add missing tests to test\_trusts.py * Respect Apache's trademark as per docs * Changed the spelling mistake * Fixing manila microversion setting in sahara.conf * Configure the publicURL instead of adminURL in devstack * Fixing Create hbase common lib shows warnings * Adding missing tests to ambari test\_client * Add missing test to api/middleware/auth\_valid.py * add test to plugins/ambari/client.py * Remove doc about config option verbose * Adding test\_validate() to storm plugin test * Updated from global requirements * [Doc] Update supported plugin description * Updated from global requirements * Improving tests for plugin utils * Add test\_get\_nodemanagers() * [APIv2] remove a method that places in wrong file * [APIv2] Migrate v1 unit test to test v2 API * Updated from global requirements * Add test\_get\_config\_value() * [Doc] Fix error in docs * Add test\_add\_host\_to\_cluster() * Remove support for py34 * Add test\_get\_port\_from\_address() * [Api-ref] fix description of response parameters * Add test\_move\_from\_local() * add test\_parse\_xml\_with\_name\_and\_value() * Prepare for using standard python tests * Fixing epel-release bug on MapR cluster installation * Update reno for stable/ocata * Replacement of project name in api-ref 6.0.0 ----- * Fix unexpected removing of deprecating flag for MapR 5.1 * Remove MapR v5.0.0 * Add Kafka to MapR plugin * Fix Maria-DB installation for centos7 * Add new service versions to MapR plugin * Extend cluster provision logging of MapR plugin 6.0.0.0b3 --------- * Updated from global requirements * Updated from global requirements * [APIv2] Update registry images tagging * Updated from global requirements * Change link to mysql-connector for Oozie in MapR plugin * Fix links in tests docs * API: Updating error response codes * Add HBASE MASTER processes number validation * Updated from global requirements * Fix some doc and comments nits * Updated from global requirements * Updated from global requirements * Add test\_natural\_sort\_key() * Remove unexpected files * Updated from global requirements * Add test\_update\_plugin() * Fixing test\_cluster\_create\_list\_update\_delete() * fix syntax errors in labels.py * Set access\_policy for messaging's dispatcher * Add reno for CDH 5.9 * support of CDH 5.9.0 * Removing "def" from the methods at edp.spi * support of HDP 2.5 * Updated from global requirements * Update "Additional Details for MapReduce jobs" docs * Judgment error * Fix typo error * Adding tenant\_id to regex\_search * Correct the unit test in V5\_5\_0 * Adding tenant\_id to regex\_search * modify useless assertions * Updated from global requirements * Fix typo in cover.sh * Updated from global requirements * fix some typos 6.0.0.0b2 --------- * Problem about permission * Switch use\_neutron=true by default * Use assertGreater(len(x), 0) instead of assertTrue(len(x) > 0) * Updated from global requirements * Replace logging with oslo\_log * replace 'assertFalse' with 'assertNotEqual' * [DOC] Beutify the chapter 'sahara on ironic' * Updated from global requirements * Updated from global requirements * Updated from global requirements * Updating list of plugins in config sample * Fix error of CDH plugin scale up more than one node * Show team and repo badges on README * Updated from global requirements * spelling fixed * definition spelling mistake * fix creation of endpoints * Updated from global requirements * Fixing endpoint type for glance client * Fixed some typos. Trivial fixes * Updated from global requirements * Provide context for castellan config validation * totally changed requred to required 6.0.0.0b1 --------- * Fix import of common libraries from Manila client * Catch correct exception in check\_cinder\_exists fct * Remove enable\_notifications option * Updated from global requirements * Updated from global requirements * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Updated from global requirements * Updated from global requirements * Fix remove not existed devices * Updated from global requirements * Fix check cinder quotas * OpenStack typo * No doctext in some ProvisioningPluginBase methods * Updated from global requirements * Fix a typo in rootwrap.conf * Fix a typo in devstack.rst * [Trivial Fix]Fix typo in test\_images.py * Constraints are ready to be used for tox.ini * Use http\_proxy\_to\_wsgi middleware * Fix response code for invalid requests * Replace 'sudo pip install' with pip\_install * Improves anti-affinity behavior in sahara * Correct the spelling error * [api-ref] Fix missprints in response codes * Enable release notes translation * Updated from global requirements * Fix wrong URL to castellan’s documentation * Remove html\_static\_path from api-ref * Fix wrong message formats * Fix typo in comment * tenant replaced to project in doc * Updated from global requirements * Fixed some fonts issue in user doc, EDP section * Remove unused config.CONF * Updated from global requirements * Updated from global requirements * Updated from global requirements * Fix API compatibility issue * Updated from global requirements * Fix incorrect event log for ambari * [DOC] update doc about restapi * [DOC] update doc about sahara features * [doc] added description about plugin management * [DOC] Update quickstart guide * [DOC] update userdoc/edp.rst * Updated from global requirements * [DOC] update doc about mapr plugin * Add workaround for Hue on CentOS 7 * [DOC] update doc about config recommendations * [DOC] update configuration guide doc * Fix ZooKeeper check for CentOS 7 * Fill tempest.conf with Sahara-specific values * [DOC] update index and architecture docs * Updated Sahara architecture diagram * [DOC] Fix misprint in userdoc/statuses.rst * [DOC] update installation guide doc * [DOC] update doc about spark plugin * [DOC] update overview doc * [DOC] update doc about ambari plugin * [DOC] update upgrage guide * [DOC] update guest requirements doc * [DOC] Update Dashboard user guide * [DOC] Update dashboard dev environment guide * Update reno for stable/newton * Documentation fixes and updates for devref 5.0.0.0rc1 ---------- * [DOC] update doc about advanced configuration * Update link reference * [DOC] update doc about vanilla image builder * [DOC] update doc about vanilla plugin * do not use artifacts at sahara files * fix docs env * [doc] change location of swiftfs jars * [DOC] update doc about cluster statuses * [DOC] update doc about registering image * write docs about enabling kerberos * [DOC] update doc about CDH image builder * [DOC] update user doc about CDH plugin * [Doc] Small fixes according to Spark on Vanilla supporting * [Ambari] fix Ubuntu deploy * Remove entry point of sahara tempest plugin * Updated from global requirements * Remove Tempest-like tests for clients (see sahara-tests) * Deprecate MapR 5.1.0.mvr2 * Add repo configs * standardize release note page ordering * reimplement oozie client as abstract * allow configuration of strategy for UI * [DOC] Add docs about pagination abilities * Add MapR core 5.2 * [api-ref] Stop supporting os-api-ref 1.0.0 * Add new version pack for services * Add event log for HDP plugin * Update api-ref docs for Designate feature * Add Sentry service v1.6 to MapR plugin * Add custom health check for MapR plugin * Rename all ClusterContext variables to 'cluster\_context' * Replace mfs.exchange with g.copy\_file where it is possible * [DOC] Update user doc about Designate * [DOC] Fix misprints in api-ref * Spark on Vanilla Clusters * Added rack awareness in CDH plugin * [Doc] add description of "plugin update" to api ref 5.0.0.0b3 --------- * Updated from global requirements * Remove support for Spark standalone * Remove ssl config for Hue * Refactor service home dir owner setting * [Ambari] More flexible auto configuration * Fix invalid security repo * Added rack awareness in HDP plugin * Updated from global requirements * use \_LE() to wrap the error message * Added option to disable sahara db for storing job binaries * Config logABug feature for Sahara api-ref * Remove unused config.CONF * improve logging for job execution failure * Updating DOC on floating IPs change * Updated from global requirements * Fix wait conditions with SSL deployments * Enabling MapR on CentOS7 * Updated from global requirements * Fix wrong instance count in provision events * [doc] Fix some problems in docs * delete unused LOG in some files * TrivialFix: Remove logging import usused * Fix mapr cluster deployment * Remove MAPR\_USER variable * Delete useless 'pass' * Updated from global requirements * replace assertListEqual() to assertEqual() * Updated from global requirements * Error handling during hosts file generation * Replace 'lsb\_release -is' with the method 'get\_os\_distrib' * Add auto configs to HDP plugin * Correct reraising of exception * Fix wrong epel version for CentOS 7 * Clean imports in code * Adding release note to floating ips change * Updated from global requirements * Remove hardcoded password from db schema * Get ready for os-api-ref sphinx theme change * Replace old CLI calls in grenade * Updated from global requirements * Add Kafka to CDH 5.5 and CDH 5.7 * Updated from global requirements * plugins:patch is now admin only operation * Fix small bugs in pagination * Fix wrong hue-livy process name and move installation * Fix wrong owner setting for config files * copying oozie.warden to prevent failure * Updated from global requirements * Image argument validation and declaration * [ambari] support kerberos deployment * [cdh] kerberos support implementation * kerberos infra deployment impl * Fixed the error with updating the job via command line * Add sorting ability to Sahara-API * Health check for Designate * Fix configs for repos and swift urls in CDH 5.7 * Added documentation for Designate feature * Documentation for image gen CLI and framework * Updated from global requirements * Updated from global requirements * Updated from global requirements * Designate integration * Updated from global requirements * Correct reraising of exception * Updated from global requirements * Updated from global requirements * Updated from global requirements * labels for CDH plugin * Changing zookeeper path while updating conf * labels for MapR plugin * Remove hardcoded password for Oozie service * Refactor the logic around use of floating ips * Adding argument-related validators for image configurability * Configuration engine for image generation CLI * Use assertEqual() instead of assertDictEqual() * improve error message for execution with retries * remove infrastructure engine option * Add pagination ability to Sahara-API * [DOC] Added docs for sahara+ironic * [DOC] Inform operators about limited quotas * delete two unused LOG * Updated from global requirements * Remove unused LOG * Updated from global requirements * Fixing unit tests for image create * improved scaling for cdh plugin * Adding Pyleus configs to Storm plugin * Add Python 3.5 classifier and venv * Docs should use "--plugin-version" instead of "--version" * CLI for Plugin-Declared Image Declaration * make ability to return real plugins in list ops * Failed to download ext-2.2.zip from dev.sencha.com * Adding Python Jobs using Pyleus * Simplify tox hacking rule to match other projects * [DOC] Cleanup time for incomplete clusters * improvements on api for plugins 5.0.0.0b2 --------- * Resolves issue where to allow custom repo URLS * Updated from global requirements * Updated from global requirements * don't serialize auto security group if not needed * Fix typo in ambari\_plugin.rst * replace import of future to the top * fix building api ref docs * The addition of the parentheses for py3 * [DOC] Update installation guide * use sessions for creating heatclient * Fixed spelling error * forbid cluster creation without secondarynamenode * Fix subdirectory typo in sahara db template Readme file * Updated from global requirements * Upgrade Storm plugin to version 1.0.1 * Updated from global requirements * Add Impala 2.2 to MapR plugin * Support of CDH 5.7 * fixing sahara-engine setup in devstack * Fix typo in configs\_recommendations.rst * Remove outdated tools * [DOC] improve docs * Fix typo in cdh\_plugin.rst * Fix glanceclient.v2.images * Remove unecessary decorators from private interface * Ignore Nova config drive in devices list * plugins api impl * sleep before waiting requests * allow to specify notifications transport url * ability to configure endpoint type for services * Updated from global requirements * novaclient.v2.images to glanceclient migration * Updated from global requirements * Update documentation for hadoop-swift * Updated from global requirements * Updated from global requirements * [DOC] updated docs about keystone cli * Trivial: Fix wrong button name in dashboard user guide * Updated from global requirements * implement db ops for plugin's api * replace seriailization of plugin to PluginManager * Moving WADL docs to Sahara repository * Remove convert to cluster template feature * Trivial: Remove useless words in CDN image builder doc * Updated from global requirements * remove ability to create barbicanclient * Fix the ca certificate handling in the client sessions * fix grenade from mitaka upgrade * remove config groups associated with removed hosts * Updated from global requirements * workaround to fix ambari start on centos7 * Updated from global requirements * Fix provision events for installing services * New version of HDP plugin 2.4 * Display credentials info in cluster general info * Updated from global requirements * Improve timeout message when cluster create fails * Updated from global requirements * Modify HDP plugin doc for Ambari plugin 5.0.0.0b1 --------- * Fix retrieve auth\_url and python 3 jobs * Readable logging for Heat templates * Use split\_path from oslo.utils * Added "\" In quickstart guide * Corrects MapR distro selection for RHEL * Fix cluster creation with another tenant * Updated from global requirements * Added unit tests for CDH 5.5.0 deploy file * Updated from global requirements * [Trivial] Remove unnecessary executable privilege * Updated from global requirements * Code refactoring of ambari deploy processes * Fix down scaling of ambari cluster * HDP hive HDFS support * improve description of ambari plugin * Remove hdp 2.0.6 plugin * Updated from global requirements * Fix grenade * Updated from global requirements * Minimise number of auto security groups * remove verbose option in devstack plugin * use the only method to initialize client * Updated from global requirements * Resolve bug with long value to RANDSTR function * Change 'Hbase' to 'HBase' string in spark service * Updated from global requirements * Remove openstack/common related stuff * Added unit tests for ha\_helper file * Updated from global requirements * Updated from global requirements * Fix typo in Spark service * Renamed job execution and templates endpoints * Fix doc about scenario and Tempest tests * keystoneclient to keystoneauth migration * Helper method to use dnf instead of yum on fedora >=22 * PrettyTable and rfc3986 are no longer used in tests * Update the links to the RDO project * Focus the documentation on distributed mode * Updated from global requirements * cdh plugin yum install option "-y" missing * update options mentioned in tempest readme * Update hadoop swift docs * Updated from global requirements * Fix doc build if git is absent * Added new unittest to oozie module * Updated from global requirements * SPI Method to Validate Images * Added tests for sahara cli * Fix unavailable MCS link * Define context.roles with base class * Update the Administrator Guide link * Updated from global requirements * Updated from global requirements * Change property for auto creating schema * Remove unsupported services from 5.1.0 * Updated from global requirements * Updated from global requirements * Bandit password tests * Workaround for temporary Oozie bug * Fixing the bandit config * Pkg installation to ssh\_remote * fix syntax error in ui dev docs 4.0.0 ----- * Set libext path for Oozie 4.0.1, 4.1.0 * rename service api modules * Fixing grenade job * Add hadoop openstack swift jar to ambari cluster * Fix Hue integration with Spark and Hive * Move bandit to pep8 * Revert "Remove PyMySQL and psycopg2 from test-requirements.txt" * Do not build config example for readthedocs.org * Remove PyMySQL and psycopg2 from test-requirements.txt * Correctly configure Spark with Hive, HBase * Set libext path for Oozie 4.0.1, 4.1.0 * Add hive property for Hue < 0.9.0 * Updated Sahara arch diagram * Fix incorrect visualization of MapR versions * Updated volumes section in docs * Update reno for stable/mitaka * Update .gitreview for stable/mitaka 4.0.0.0rc1 ---------- * Updated UI docs * Fix staled configs for ha deployments * Use auth admin for get\_router when building proxy commands * Don't use precreated ports in heat templates * get\_admin\_context overwriting context * Inject drivers to jars in Ambari Spark engine * Deprecate HDP 2.0.6 plugin * Fix updating datasource without changing a name * register the config generator default hook with the right name * Fix a mess in config helpers * rewrite wait condition script * Run cluster verification after cluster / creation scaling * Fix HA for Resourcemanager * Add an extra copy of neutron info after run\_job * Remove cinder v1 api support * Updated from global requirements * Updating quickstart guide with openstackclient usage * Fix MapR 500 tempest test fails * Moved CORS middleware configuration into oslo-config-generator * Add MapR 5.1.0 * Fix blueprints configuration for HA 4.0.0.0b3 --------- * Do not use explict keyword arguments in image resource * Improve exception message for wait\_ambari\_requests * Added #nosec to sahara.service.coordinator package * Added #nosec to sahara.utils.hacking package * add nosec to subprocess usage in launch\_command * add nosec to remote ssh pickle usages * Refine the code for CDH PluginUtils class * Remove UI configuring for Oozie * Updated from global requirements * HA for NameNode and ResourceManager in HDP 2.2 * move heat template version to common module * No longer necessary to specify jackson-core-asl in spark classpath * Improve config description in CDH config\_helper * Remove unneeded version string check in CDH plugin * Remove unused pngmath Sphinx extension * Add Flume 1.6.0 to MapR plugin * Remove vanilla 2.6.0 in doc * Remove unsupported MapR plugin versions * Updating get\_auth\_token to use keystonemiddleware * remove hdp from the default plugin list * enable ambari plugin by default * Updating dashboard user guide post-reorg * Use the integrated tempest.lib module * Update CDH user doc for CDH 5.5.0 * Add CDH 5.5 support * CDH plugin edp engine code refactoring * CDH plugin config helper refactoring * Updated from global requirements * Use ostestr instead of the custom pretty\_tox.sh * split cloudera health checks * ambari health check implementation * Making health verification periodics distributed * Fixed typo of precendence to precedence * Fix typo in api\_validator.py * Updated from global requirements * Added #nosec for bandit check * Missing ignore\_prot\_on\_def flag * Updated from global requirements * Remove support for spark 1.0.0 * [EDP] Add suspend\_job() for sahara edp engine(oozie implementation) * Updated from global requirements * Remove vanilla 2.6.0 code * Add Spark 1.5.2 to MapR plugin * Fix in wrong substitution * Adding data source update validation * Adding more information to validation errors * Revert "Fix gate pep8" * Add default templates for spark plugin, version 1.6.0 * Updated from global requirements * Add Hue 3.9.0 to MapR plugin * Add property 'MapR-FS heap size percent' to cluster template * implement sending health notifications * cloudera health checks implementation * Added scaling support for HDP 2.2 / 2.3 * base cluster verifications implementation * Check that main-class value is not null in job execution validator * Fixes to make bandit integration tests work with sahara * honor api\_insecure parameters * Replace assertNotEqual(None,) with assertIsNotNone * Start RPC service before waiting * Add support running Sahara as wsgi app * Add test cases for CDH plugin config\_helper * CDH plugin versionhandler refactoring * Add test cases for versionhandler * Remove support of HDP 2.2 * Use the oslo.utils.reflection to extract class name * Don't use Mock.called\_once\_with that does not exist * Add regex matching for job\_executions\_list() * Add regex matching for job\_binary\_internal\_list() * Python3: Fix using dictionary keys() * Await start datanodes in Spark plugin * Updated from global requirements * Add regex matching for job\_list() * Add regex matching for job\_binary\_list() * Add regex matching for node\_group\_templates\_list() * Add regex matching for clusters\_list() * Add regex matching for data\_sources\_list() * Add regex matching for cluster\_templates\_list() * add initial v2 api * add orphan to configs recommendations * add vanilla image builder docs to index * Enabling distributed periodics in devstack * Adding doc about distributed periodics * Fix gate pep8 * Added support of Spark 1.6.0 * Distributed periodic tasks implementation * Parse properties with custom key/value separator * Updated from global requirements * Revert "Enable sahara-dashboard devstack plugin in sahara plugin" * Update bandit version * Update the devstack.rst document * Enabling cluster termination via OPS in periodics * use uppercase 'S' in word "OpenStack" * Fix spell typos * Add creation of mapr user * Fix missing configuration for mapreduce * Fix problem with zombie processes in engine * Add Hive 1.2 to MapR plugin * Add Oozie 4.2.0 to MapR plugin * Add Pig 0.15 to MapR plugin * Add Drill 1.4 to MapR plugin * Add ability for setting file mode * CDH plugin validation mudule refactoring * Add CDH plugin validation test cases * Add install priority to each service * Remove redundant tabs when add MapR repos * Remove outdated pot files * Add unit test cases for cdh plugin utils * Move notifications options into oslo\_messaging\_notifications * Updated from global requirements * Allow 'is\_public' to be set on protected resources * Add 'is\_protected' field to all default templates * Change 'ignore\_default' to 'ignore\_prot\_on\_def' * Remove overlap of 'is\_default' and 'is\_protected' for templates * correct spelling mistake * Update the link to sahara.py * Updated from global requirements * Add release notes for external key manager usage * Fix anti-affinity handling in heat engine * Where filter is not done correctly on programmatic selection * Remove scenario tests and related files * Use internal auth url to communicate with swift * Updated from global requirements * notification\_driver from group DEFAULT is deprecated 4.0.0.0b2 --------- * Migrate to new repository in gate checks * Fix python 2,3 compatibility issue with six * Fixing kwarg name for centos repository * Updated from global requirements * Fix using regions in all OS clients * Add release notes for scheduling EDP jobs * remove openstack-common.conf * Updated from global requirements * Enable sahara-dashboard devstack plugin in sahara plugin * Add a common Hive and Pig config in workflow\_factory * add cdh plugin passwords to key manager * add debug testenv in tox * add developer documentation about the key manager * Updated from global requirements * add helper functions for key manager * Setting auth\_url for token auth plugin object * Replace deprecated library function os.popen() with subprocess * Enable passwordless ssh beetween vanilla nodes * Added Keystone and RequestID headers to CORS middleware * Removed redundant list declaration * Updated from global requirements * Change assertTrue(isinstance()) by optimal assert * Fix wrong file path in scenario test README.rst * Updated from global requirements * Use run\_as\_root instead of sudo to execute\_command * Ensure default arguments are not mutable * Compare node groups in CDH plugin IMPALA validation * Add translation for log messages * Fixing cinder check with is\_proxy\_gateway * Update HA scenario for CDH * Use cfg.PortOpt for port option * Clean the code in vanilla's utils * [EDP] Add scheduling EDP jobs in sahara(oozie engine implementation) * Adding doc about data source placeholders * Remove she-bang from sahara CLI modules * Stop using unicode builtin * Initial key manager implementation * Move c\_helper, db\_helper into \_\_init\_\_ for CDH plugin\_utils * Updated from global requirements * Added check for images tags * Updated from global requirements * Replace assertEqual(None, \*) with assertIsNone in tests * Updates DevStack git repo link in Sahara Dev Ref * Implement custom check for Kafka Service * Don't configure hadoop.tmp.dir in Spark plugin * Updated from global requirements * Deprecated tox -downloadcache option removed * Updated from global requirements * Scenario templates: make is\_proxy\_gateway configurable * Added several parametrs to priority-one-confs file * Add CDH plugin edp engine unit tests * Add missing i18n module into CDH plugin edp\_engine * Add ability to get auth token from auth plugin * Trust usage improvements in sahara * Replacing all hard coded cluster status using cluster\_utils * Always enable heat service in devstack plugin * Remove unused code from volumes module * Updated from global requirements * Now updating cluster templates on update * Add log when directly return from cancel\_job * Updated from global requirements * remove the qpid message driver from the configuration file * Adds nosec to system call in NetcatSocket.\_terminate * rewrite heat client calls * Remove MANIFEST.in * Updated from global requirements * refine the development environment document * test: make enforce\_type=True in CONF.set\_override * Explicitly calling start for sahara-api in sahara-all * Adding ability disable anti\_affinty check in plugin * Remove version from setup.cfg * Force releasenotes warnings to be treated as errors 4.0.0.0b1 --------- * Override verify argument of generic session * Add missed checks for testing update method * Updated from global requirements * Optimize "open" method with context manager * Launching 1 instance in grenade instead of 2 * Updated from global requirements * Fix bashate warnings * Support of Spark EDP in Ambari plugin * Check cluster if it is None before run job * Enable heat\_enable\_wait\_condition by default * Update scenario test readme file * Add more useful information to the Heat stack description * Replacing hard coded cluster status using cluster\_utils * cleanup sahara commands * Support unmounting shares on cluster update * Updated from global requirements * Mounting changed shares on cluster update * Remove unneeded 'self' in plugins.cdh.v5\_4\_0.plugin\_utils * Drop direct engine support * Remove old integration tests for sahara codebase * Option for disabling wait condition feature * Remove unneeded volume serialization * Updated from global requirements * Doc fix: use\_floating\_ip to use\_floating\_ips * change port option from Opt to IntOpt * implement is\_sahara\_enabled * Add test cases for CDH plugin versionfactory * Adding tests for checking updating of templates * Updated from global requirements * Add "unreleased" release notes page * Support reno for release notes management * Update Sahara Dev Quickstart Guide * Updated from global requirements * Fix doc8 check failures * Rename get\_job\_status to get\_job\_info in oozie.py * Updated from global requirements * Run py34 first in default tox run * Updated from global requirements * Use oslo.service for launching sahara * Disable base repos by the option * Publish sample conf to docs * refine the sahara installation guide * Move doc8 dependency to test-requirements.txt * Fix E005 bashate error * Plugin version error in scenario test for vanilla2.6.0 * Add unit test to cover cancel job operation in oozie engine * Make ssh timeout configurable * Missing stuff for Kafka in Ambari plugin * Add default templates for MapR plugin 5.0.0 mrv1 & mrv2 * Support overriding of driver classpath in Spark jobs * Add ability validate yaml files before run tests * Remove TODO line while bug 1413602 is fixed * Add CDH test enabling HDFS HA * Add CDH 5.4.0 contents in doc * Allowing shares to be edited on cluster update * Remove TODO in the feature.rst * Fix a couple typo in EDP doc * Refine the overview.rst for sahara * Fix Spark installation fails when parsing spark-env.sh * Disable security for Oozie in Ambari * Remove verbose code for hive metastore schema creation in MapR plugin * Providing more information about fail job * Refine the doc for sahara * Fix magic method name in plugin.cdh.clent.type * Add additional filter to volume\_type check * Remove known issue from the doc * Use assertTrue/False instead of assertEqual(T/F) * Updated from global requirements * Fixing job execution creation with is\_protected field * Fixing cluster creation with is\_protected field * Adding ability to register image without description * Get Open Ports for Storm * Simplify the method \_count\_instances\_to\_attach * Add source url into README.rst * Updated from global requirements * Fix Mapr on ci * Fixing problem with validation of job binaries update * Fixing search of devices that need to be mount * Cleanup config-generator.sahara.conf * Updated from global requirements * Switched CORS configuration to use oslo\_config * Add batching for EDP jobs in scenario tests * Fixing event log handling during volumes mount * Fixing grenade job * Add support of Drill 1.2 to MapR plugin * Add -f option to formatting volumes for xfs * Hive job type support on CI * Add unit tests for AuthValidator middleware * Remove old sahara endpoint * Updated from global requirements * Add -f option to formatting volumes * Fix issue with job types in Ambari plugin * Fix tempest tests * Modify service-role view in creating node group template * Updated from global requirements * Add ability running tests on existing cluster * Reformat job flows * Bringing the Sahara Bandit config current * Add testresources used by oslo.db fixture * Use api-paste.ini for loading middleware * Add event logs for MapR plugin * code cleanup * Fixing grenade job for upgrades from liberty * Fix typos in developer documentation * Updated from global requirements * Fixing cluster creation without auto\_security\_group * Use distributed mode by default in devstack * Updated from global requirements * Adding ability run several edp jobs flows * Updated from global requirements * Add /usr/share/sahara/rootwrap to filters\_path * Fixing grenade\_job * replace multiple if stmts with dict and for loop * Fix the bug of "Error spelling of a word" * Fix the bug of "Error spelling of 'occured'" * Removed redundant metaclass declarations in MapR plugin * Fix of client tests in tempest * Added support for Spark 1.3.1 in MapR plugin * use list comprehensions * Cleanup databases during execution of hive example * Open Mitaka development 3.0.0 ----- * Use explicit version of image client in gates * Use xfs for formatting * Configurable timeouts for disk preparing * Generate random heat stack name for cluster * Resolve issue with operating heat stack outputs * Updating vanilla imagebuilder docs * Add more information about configuring NTP service * Fix problem with loading Ambari configs * Update indexes after adding security repo in MapR plugin * Add put data in HDFS for EDP testcase * Add wait condition for heat templates * Updated from global requirements * Change ignore-errors to ignore\_errors * Fix wrong init of ThreadGrop * Fix missed service message in MapR plugin * Heat stack creation with tags * Enable ceilometer services using new plugin model * Add spaces around function params for browser to linewrap on * Convert manila api version to string * [doc-day] Updated development guidelines * Adapt python client tests to use Tempest plugin interface * Python client tests: access to credentials/net clients * Formatting and mounting methods changed for ironic * HDP plugin should ignore untagged configs when creating cluster\_spec * Fixed service restart in MapR plugin * Adding check of indirect access * Fix problem with create cluster w/o internet * Improving node group templates validation * Fix incorrect function name in swift client * Adding fake plugin usage in validation ut * Update hdp plugin docs * Use get\_resource instead of Ref defenition * Create ResourceGroup with volumes only if it is required * Selects IPv4 preferentially for internal\_ip * Fix working scenario tests with swiftclient * Improving cluster templates validation * Report stack\_status\_reason on heat failure * Change nova client util to use proper client * New doc about autoconfiguration policy * Increasing time for cluster creation/deletion in grenade * cleanup spark plugin documentation * Remove mountpoint from heat stack because it always null * Fix capitalization on sahara * Updating Ubuntu Server version in devstack doc * Fixed RM HA in MapR plugin 5.0.0 MRv2 * Only add current directory to classpath for client deploy mode * Include YARN 2.7.0 to service install priority list in MapR plugin * Updated from global requirements * Documenting interface map * Set the flavor to large for the cdh 5.4.0 name node in template.conf * Use custom flavor in gate * Add SPARK\_YARN\_HISTORY\_SERVER to default templates for cdh * Register SSL cert in Java keystore to access to swift via SSL * Adding doc about shared and protected resources * Convert True to string for image registry * Fixed Hive 1.0 failure on MapR plugin * [sahara doc fix] log guidelines doc * Removed duplicated definition of support Impala in MapR plugin * Add keystone and swift url to /etc/hosts * Update plugin spi docs with new method * [sahara doc fix] guest requirements doc * [sahara doc fix] registering image doc * Enable anti\_affinity feature in scenario test * Fix mocks in scenario\_unit tests * [CDH] Fix problem with launching Spark jobs * Updating architecture doc * [sahara doc fix] update the statuses.rst in userdoc * Drop HDP 1.3.2 plugin * Drop Vanilla Hadoop 1 * Adds IPv6 support to auto security group * Updating overview document * Updating the userdoc configuration * Correcting userdoc installation guide * Minor updates to edp documentation * Modify recommend\_configs arguments in vanilla 1 * Updated from global requirements 3.0.0.0b3 --------- * Minor updates and fixes to features doc * updating index doc * updating plugins doc * Added CORS middleware to Sahara * Documentation for Manila integration * Updating userdoc overview * Add missing ssl\_verify for swift in scenario tests * [doc-day] Updated development environment guide * Updating the dashboard guide for Sahara * Updating the rest api documentation * Updating the dev environment guide for the Sahara UI * Update documentation for Vanilla plugin * Add port type on port option * Updated from global requirements * Print Heat stack before create/update to debug logs * Remove useless test dependency 'discover' * Use internalURL endpoint by default in Sahara * Use demo user and tenant for testing * Explicitly set infra engine based on job type * Use less resources in sceanrio gate job * Removed installation of Oozie sharelibs in MapR plugin * Fix problem with using auto security groups in Heat * adding developer docs guidelines about clients * Added HBase REST node process to MapR plugin * Disable autotune configs for scaling old clusters * Add sample spark wordcount job * Deprecate Vanilla 2.6.0 * Add additional HDP services * Add EDP services to new HDP plugin * Add base services support for HDP 2.2 / 2.3 * Adding support for the Spark Shell job * project\_name is changed to optional parameter * Change version package imports to correct in MapR plugin * Added support of Hue 3.8.1 to MapR plugin * Job execution cancel timeout * Rename oozie\_job\_id * adding neutron to sessions module * adding cinder to sessions module * Removing token information from debug log * Fix bash condition for enabling heat engine in devstack * Updated from global requirements * Enable YARN ResourceManager HA in CDH plugin * Changing scenario runner to use subprocess * Add CDH HDFS HA part in the user doc * Updated from global requirements * Fail if FAILED in the scenario tests run log * Actually install Heat for the Hest-based jobs * Ensure working dir is on driver class path for Spark/Swift * Add validation rules about IMPALAD * Remove unneeded starting ntp * Expose cloudera manager information * Updated from global requirements * adding nova to session cache * Adding Grenade support for Sahara * Update plugin version for transient tests to vanilla 2.7.1 * Updated from global requirements * New version of HDP plugin * Adding shared and protected resources support * Adding is\_public and is\_protected fields support * Use "get\_instances" method from sahara.plugins.utils * Doc, scenario tests: variables config file * Adding clusters\_update api call * Implement ability of creating flavor for scenario tests * Add support of SSL in scenario tests * Remove libevent installation from MapR plugin * Updated from global requirements * Add manila nfs data sources * Added support for MapR v5.0.0 * Run scenario tests for the fake plugin in gate * Add separated dir with fake plugin scenario for gate testing * Set missed admin user parameters used for trusts creation in devstack * Make tools/pretty\_tox.sh more informative and reliable * Make infra engine configurable in devstack plugin * Added support of Hadoop 2.7.0 to MapR plugin * Remove never executable code from devstack plugin * Scenario tests: store ssh key if resources are retained * doc, sahara-templates: fix typo * Add scenario gate testing placeholders * Adding job\_update api call * Adding job\_execution\_update api call * Adding sessions module and keystone client upgrade * Adding job\_binary\_internal\_update api call * Fix HBase config name when using HA with HDP 2.0.6 * Removed confusing typos in utils/openstack/base.py file * Remove README in sahara/locale * Update stackforge to openstack * Updated from global requirements * Fix wrong compute nodes name in doc * Adding HTTP PATCH method to modify existing resources * Allow Sahara native urls and runtime urls to differ for datasources * Support manila shares as binary store * Add script to report uncovered new lines * Increase coverage report precision * Add recommendation support to Cloudera plugin * Support placeholders in args of job for i/o * add unit test for test\_hdfs\_helper * Updated from global requirements * Update vanilla plugin to the latest version * Remove quotes from subshell call in install\_scala.sh * Fixed WebServer validation in MapR plugin * Update cluster UI info in MapR plugin * Prevent writing security repos twice in MapR plugin * Check ACLs before adding access for Manila share * Make starting scripts module for vanilla 2 plugin * Small refactoring for vanilla 2 * Fix MapR plugin versions loading * Put missing fields to validation schema * Remove test for job type in get\_data\_sources * add unit test cover oozie upload workflow file function * Updated from global requirements * Remove spaces from Sahara key comment * Increase internal\_ip and management\_ip column size * Drop support of deprecated 2.4.1 Vanilla plugin * Added support of Drill 1.1 to MapR plugin * Added support of HBase 0.98.12 to MapR plugin * Added support of Mahout 0.10 to MapR plugin * Added support of Hive 1.0 to MapR plugin * Add CLUSTER\_STATUS * Remove cluster status change in HDP plugin * Removed support of Hive 0.12 and Impala 1.2.3 from MapR plugin * Changed misleading function name in Heat engine * Mount share API * EDP Spark jobs work with Swift * Fix six typos on sahara documentation 3.0.0.0b2 --------- * Configure NTP service on cluster instances * Updated from global requirements * Changing log level inside execute\_with\_retries method * Updated from global requirements * Remove extra merge methods in plugins * Add configs unit test case * Change zk\_instance to zk\_instances in storm plugin * Add recommendation support for Spark plugin * Migrate to flavor field in spark 1.3.1 * Cleanup .gitignore * Ignore .eggs directory in git * Use keystone service catalog for getting auth urls * Storm job type not found * Implement recommendations for vanilla 2.6.0 * Add missing mako template for Spark 1.3.1 * Add unit test for external hdfs missed for URLs * Migrate "flavor\_id" to "flavor" in scenario tests * Remove openstack.common package * updating documentation on devstack usage * Added the ability to specify the name of the flavor\_id * [EDP]upgrade oozie Web Service API version of oozie engine * Enable HDFS HA in Cloudera plugin * Made 'files' dict as member field of ClusterStack * Changed all stacks retrieval with filtered search * Removed useless ClusterStack class from heat engine * Removed useless 'Launcher' class from heat engine * Cluster creation with trust * Add default templates for Spark 1.3.1 * Add Zookeeper and Sentry in CDH540 scenario tests * Fix README.rst in scenario dir * Fix installing python-saharaclient * Deprecate Spark 1.0.0 * Switch to the oslo\_utils.fileutils * Added failed thread group stacktrace to logs * Updated from global requirements * [CDH] Provide ability to configure gateway configs * Remove the old scenario YAML files * Derive Mako scenario templates from the current YAMLs * Improvement check scale in scenario tests * Allow multiple clusters creation * Modify launch\_command to support global variables * Allow Mako templates as input for scenario test runner * Updated from global requirements * Allowing job binary objects to be updated * Resolve 500 error during simultaneously deletion * Fix retrieve\_auth\_url in case Keystone URL does not contain port * Spark job for Cloudera 5.3.0 and 5.4.0 added * Fix problem with using volumes for HDFS data in vanilla plugin * Fix failed unit tests * Added support of Drill 0.9 to MapR plugin * Added support of Drill 0.8 to MapR plugin * Added support of HBase 0.98.9 to MapR plugin * Added support of Hue 3.7.0 to MapR plugin * [EDP] Delete edp job if raise exception * add unit test covering cancel\_job in job\_manager * Remove un-used "completed" filed when do cluster\_provision\_step\_add * Allow to specify auto\_security\_group in default templates * Add check for cinder in scenario tests * [HDP] Nameservice awareness for NNHA case * Return back devstack exercise to in-tree plugin * Fix devstack plugin - sahara repo already cloned * Add py34 to envlist * Remove bin/ scripts support from in-tree devstack plugin * Enable all plugins in devstack code * Add CM API support for enable hdfs HA * Add bashate check for devstack scripts * Updated from global requirements * Add in-tree Devstack plugin * Support Spark 1.3.1 * Updated from global requirements * Minor - move definition to avoid AttributeError * [EDP] Unified Map to Define Job Interface * Enable Java Keystore KMS service in CDH5.4 * [EDP][Oozie] external hdfs missed for URLs in job\_configs * Fix compatible issues in unit tests for python 3 * Use right oslo.service entry points * Updated from global requirements * pass environment variables of proxy to tox * Switch to oslo.service 3.0.0.0b1 --------- * Updated from global requirements * Add sentry check for CDH 5.3 * Allowing data souce objects to be updated * Updated from global requirements * Add method for geting instances with process * Add CDH5.4 support in sahara * Add support of custom scenario to scenario tests * Added method for connect to node and run command * Update version for Liberty 3.0.0a0 ------- * Removed dependency on Spark plugin in edp code * Removed unused filtering in get\_plugins * Refactor exception is Sahara * Removed HashableDict * Updated from global requirements * Spark doc references vanilla diskimagebuilder page * Also install alembic\_migration folder * Remove duplicate 'an' and 'the' in docs * Add policy namespace change to the upgrade guide * Transform configuration values into int or float when needed * Updated from global requirements * Add cinder volumes to mapr scenario template * Modifying Swift Paths for EDP Examples * Updated from global requirements * Fix problem with removing PID from list * Remove deprecated group name of option * Remove WritableLogger wrapper * Updated from global requirements * [CDH] Load missed configs from yarn-gateway.json * Switched from all stacks polling to filtered list * Disable neutron DEBUG logs * Fixed typo in the Oozie CL documentation * Move cluster deletion to common engine module * Fix Typo Error for "Cloudera" * Don't use reduce for python3 compatibility * Making policy namespaces more unique * Switched Heat engine to ResourceGroup use * Add "null" for fields in cluster and node group template JSON schemas * Minor - Fixed wrong log formatting * Hiding volumes from cluster output * Minor improvement of validation rules * [CDH] Add validation check about dfs\_replication * Update list of supported API versions * Fix issue with configuring HDP cluster * Add updating jobs statuses before cluster deletion * Added missing retries of clients calls * Adding retry ability to cinderclient calls * Fix typo in Sahara doc * Added checking of event-log in scenario tests * Print traceback in logs for cluster operations * Update the docs about how to build images for Sahara usage * Fix logging\_context\_format\_string input for sahara * Added validation of template names in scenario tests * Adding retry ability to heatclient calls * Enabling Swift client retries * Adding retry ability to keystoneclient calls * Adding retry ability to novaclient calls * Adding retry ability to neutronclient calls * Remove the custom duplicate check on cluster template update * Fix cluster templates update * Fix MapR Oozie dependency resolution * Fix usage volume type in Heat * Adding ability to retry clients calls * Remove resetting self.flavor\_id in CDH test * Remove custom duplication check for node group template update * Fixed bug with volume type validation * Improve unit tests of general utils * Implementation of Storm scaling * Adding yaml scenario file for Mapr 4.0.2 plugin * Added support of Oozie 4.1.0 to MapR plugin * Use PyMySQL as MySQL DB driver for unit tests * Extra tests for quotas * Deprecate the Direct Engine * Updated from global requirements * Improve compatible with python3 * [HDP] java64\_home not pointing at default-installed JDK for plugin * Fixed logging issues with missing ids * Add support of Mapr FS to scenario tests * Updated from global requirements * Use keystone session in new integration tests * Fix logging\_context\_format\_string input for sahara * Implemented support of placeholders in datasource URLs * Drop use of 'oslo' namespace package * Added support of Pig 0.14 to MapR plugin * Remove sqlalchemy-migrate from test-requirements * Session usage improved in sqlalchemy api * Increase edp module test coverage * Added unit tests for service/api module * Improved unit test coverage of poll\_utils * Updated from global requirements * Fix delete volume and improved conductor coverage * Improved coverage for workflow\_creator * Test coverage improvement for cluster\_progress\_ops * Test coverage improvement for sahara.service.networks * Make configurable timeouts in scenario tests * Storm EDP implementation * Fix InvalidRequestError being skipped * Remove unused code from sqlalchemy api module * Add unit tests for exceptions module * Improved unit test coverage of periodic module * Fix management IPs usage * Test coverage improvement for sahara.service.engine * Improve unit test for HashableDict * Improved test coverage for utils/resources * Change ext-2.2.zip url * Adding basic bandit config * Cleanup sqla custom types * Finally drop XML REST API related code * Improve unit test for utils/edp.py * Event log supported in new integration tests * Use ThreadGroup instead of separate threads * made a change to upgrade guide * Use correct config\_helper in Vanilla 2.6 * Add sahara\_service\_type support for auth for sahara * Updated from global requirements 2015.1.0 -------- * Add links to the public place with prepared images * Add links to the public place with prepared images * Fixing log messages to avoid information duplication * Adding cluster, instance, job\_execution ids to logs * Support x-openstack-request-id * Removing unused methods from utils.openstack.\* * Release Import of Translations from Transifex * [CDH] swift lib support * Fix slow unit test * Adding .to\_wrapped\_dict to node\_group\_template update * update .gitreview for stable/kilo * Remove duplicated codes in CDH plugin * Add scenario yaml file for fake plugin * Add handler for configuration w/o sec groups * Updated from global requirements * Minor refactor of the integration service test * Added check of scaling for Spark plugin * Install Oozie UI on MapR clusters * Adding config hints for CDH plugin * Add a brief description of the default template mechanism * Put in Sahara repo actual scenario files * Use jsonutils from oslo.serialization * Add CDH template for the scenario integration test * Restrict cluster to have at most one secondary namenode * Adding config hints for vanilla plugin * Adding config hints for HDP plugin * Add hacking checks related to logging guideliness * Date format set to be correct utc date * Fix strange check in code * Rename templates in scenario yaml files 2015.1.0rc1 ----------- * Updating edp json examples * Updating the developer quickstart guide * Updating sahara-ci readme * Updating edp-java readme * Updating wordcount readme * Updates to the EDP doc * Updating installation guide * Updating features documentation * Add Sahara log guideliness * Updated from global requirements * Adding documentation for guide pages in horizon * Fix libevent and epel install on MapR * Update EDP doc * How to build Oozie docs updated * Update Cloudera plugin docs * Update statuses docs * Update vanilla plugin doc * Update jenkins doc * Open Liberty development * Update Sahara 'How to Participate' doc * Update overview.rst * Update Plugin SPI doc * Update doc for adding database migrations * Add docs for event log usage * Implement cluster creation with 'quotas = unlimited' * Update testing page in developer docs * Update development.environment.rst * Update launchpad.rst * Updating advanced configuration guide * Updating EDP SPI doc * Replace current API docs with new Sahara API docs * Migrate to oslo.policy lib instead of copy-pasted oslo-incubator * Validate node groups without volumes * Updating upgrade guide documentation * Updating EDP doc * Updating configuration guide documentation * Fix mailing list in feature requests info * Add unit-tests for new integration tests * Leverage dict comprehension in PEP-0274 * Fixed issue with waiting for ssh of deleted cluster * Default templates for MapR * Default templates for CDH * Default templates for Vanilla * Default templates for Spark * Add unit tests for default templates update functionality * Add unit tests for default templates delete functionality * Add unit tests for default templates utils * Default templates for HDP * Add a CLI tool for managing default templates * Add validation in new integration tests * Adding run time of tests * Add missed configs for ThriftJobTrackerPlugin * Minor - allow changing status description of deleting cluster * Updating horizon user guide to use new terminology * Docs updated with instance locality feature * Fix common misspellings * Add usages of poll util for service modules * Switched heat engine from JSON to HOT * Set cluster mode on every node * Adding plugin version information to scenario test report * Documentation for scenario tests * Set up network client for tempest client tests * Implement job-types endpoint support methods for MapR plugin * Drop support database downgrades * Add information about cluster state in test report * Fix topology awareness configuration * Add new log messages where it's needed * Add integration tests for scaling in Spark * Updated from global requirements * Generate random password for CM admin user * Add get and update user APIs * Add scenario files for new integration tests * Fix order of arguments in assertEqual - Part1 * Notify Kerberos and Sentry do not take effect * Raise the default max header to accommodate large tokens * Sync with latest oslo-incubator * Replace direct http requests by sahara client in Quick start guide * Add usages of plugin poll - part 1 2015.1.0b3 ---------- * Fix log import error in tempest tests for Sahara * Remove the sahara.conf.sample file * Add usages of plugin poll - part 2 * Apply event-log feature for HDP plugin * Implement job-types endpoint support methods for Fake plugin * Update MapR plugin docs * MapR validation rules fixed * Fix order of arguments in assertEqual - Part3 * Fix order of arguments in assertEqual - Part2 * Implement job-types endpoint support methods for CDH plugin * Implement job-types endpoint support methods for Spark plugin * Implement job-types endpoint support methods for Vanilla plugin * Add Spark support for MapR plugin * Install MySQL JDBC driver along with client * Default version update for vanilla integration test * Implement poll util and plugin poll util * Minor - misprint corrected * Imported Translations from Transifex * Move updating provision progress to conductor * Add usages for step\_type field * HDP plugin: Fix Beeswax error when starting Hue * Replace empty list with scalable process in scaling * Add missed translation for exceptions in versionhandler * Switch to v2 version of novaclient * Add support for MapR v4.0.2 * Changing method for verifying existence of cinder * [HDP] Add validation check for dfs.replication * Take back upstream checks for import order * Rewrite malformed imports order * Node Groups now have id field * Update the docs for CDH plugin userdoc and image-builder doc * Add an is\_default field to cluster templates and node group templates * Move cluster template schema definition to is own file * Added support of instance locality to engines * Rewrite log levels and messages * Move node group template schema definition to its own file * Add Sentry service test in cdh plugin integration test * Add transient checks support in scenario tests * Change imports after moving tempest common code * Add Hue support for MapR plugin * Skip job\_execution tempest client test * Add a common HBase lib in hdfs on cluster start * Take back upstream checks for commit message * Imported Translations from Transifex * HDP plugin: Fix Bash error when starting Hue * Adding barbican client and keymgr module * Fix tempest tests for Sahara * Updated from global requirements * Adding CDH to the list of default plugins * Added volume\_local\_to\_instance field support * [EDP][Spark] Configure cluster for external hdfs * Add validation for cluster templates update * Implement job-types endpoint support methods for HDP plugin * Add job-types endpoint * Changed heat engine to work with objects * Implemented multi-worker solution for Sahara API * Changed wrong value for total during step creation * Adding additional validation to node group template edit * [EDP] Add Oozie Shell Job Type * check solr availability integration testing without add skip\_test * Add validation for node group templates update * Add Impala service test in cdh plugin integration test * Applying event log feature for CDH - part 3 * Imported Translations from Transifex * Updated from global requirements * Refactoring methods for terminating * Apply event-log feature for Vanilla plugins * Add Impala support for MapR plugin * Add Solr service test in cdh plugin integration test * Add Sqoop support for MapR plugin * Add CM API lib into CDH plugin codes * Fix some translator mistakes * Adding ability to edit cluster templates * Removing alpha warning on distributed mode * Add missed files for migrations in MANIFEST.in * Fix indent miss caused by f4138a30c972fce334e5e2a0fc78570b0ddb288b * Applying event log feature for CDH - part 2 * Applying event log feature for CDH - part 1 * Add support of several scenario files in integration tests * Provide ability to get events directly from cluster * Add Key Value Store service test in cdh plugin integration test * Fix tempest client tests in Sahara * Remove unused field in job\_execution table * Collect errors in new integration tests * Add Drill support for MapR plugin * Minor - changed name of argument in mirgation tests * Minor - Added missing check for 'Deleting' state * Add support for oslo\_debug\_helper to tox.ini * Remove unused code (timed decorator) * Updated from global requirements * Add bare images support for MapR plugin * Add concurrency support in new integration tests * Add provisioning steps to Storm plugin * Adding ability to edit node group templates * Updated from global requirements * Fix transient cluster gating * Add Flume support for MapR plugin * Fixed format mapping in MalformedRequestBody * Reorganized heat template generation code * Add check to integration tests to check event-log * New integration tests - EDP * Add provision steps to Spark Plugin * New integration tests - scaling * New integration tests - base functional * Make status description field more useful * Imported Translations from Transifex * Updated from global requirements * Added periodic clean up of old inactive clusters * Refactor MapR plugin for Sahara * Add missing database updates for cluster events * Add option to disable event log * Fix problems with provisioning steps * Removed error log for failure inside individual thread * Add Sqoop service test in cdh plugin integration test * Add Flume service test in cdh plugin integration test * Updated from global requirements * Add ability to get cluster\_id directly from instance * Changing zookeeper to not use version number * Adding validation check for Spark plugin * [Vanilla2] Open ports for hive * Improve messages for validation * Add impala shell solr package in the cdh plugin * Add efficient method for detecting installed packages * Adding hacking check to prevent old oslo namespace usage * Refactor event-log code * Imported Translations from Transifex * Updated from global requirements * Config parameters beginning with "oozie." should be in job properties file * Add resource quota checks for clusters * Fixed bug with spark scaling * Remove obsolete oslo modules * Remove obsolete exceptions module * Adding missed oslo import change * Separate the codes of CDH5 and CDH5.3.0 * Initialize MQ transport only once * Removing service.engine.\_log\_operation\_exception 2015.1.0b2 ---------- * Using oslo\_\* instead of oslo.\* * Added documentation for indirect VM access feature * Updated from global requirements * Fixed unit tests failures caused by missing patch stops * Updated sample config after oslo messaging update * Add Swift integration with Spark * Using oslo context as context-storage for logs * Waiting should depends on cluster state * Open port 8088 for HDP 2.0.6 * Add indirect VMs access implementation * Remove log module from common modules * Specify the package name when executing Java type edp jobs * Fixed minor errors in Sahara DB comments * Drop cli/sahara-rootwrap * Add provision step to Heat engine * Make vanilla 2.4.1 plugin deprecated * Add CDH configuration in itest.conf.sample-full * Add swift and mapreduce test after scaling in cdh integration test * Add ability to search images by name * Fix getting not registered images * Add HBase service test in cdh plugin integration test * Spark Temporary Job Data Retention and Cleanup * Updated from global requirements * Update threadgroup oslo-incubator module * Update log oslo-incubator module * Fix incorrect s/oslo/sahara/ in \_i18n * Migrate to oslo.log * Refactoring datasource, job and job\_binary name validations * Updated from global requirements * Removed EXTRA\_OPTS tuning from devstack configuration * Follow the argument order specified in spark-submit help * Change CDH plugin Processes Show\_names * Updated from global requirements * Add edp.java.adapt\_for\_oozie config for Java Action * Fix getting heat stack in Sahara * Add cleanup in the integration test gating file * fix Direct engine moves cluster to "Scaling" twice * Updated from global requirements * Refactoring swift binary retrievers to allow context auth * Add integration test for Hive on vanilla2 * Add context manager to assign events * Drop uuidutils * Add refactor to Vanilla 1.2.1 * Removed unused variable from tests * Removed sad line * Imported Translations from Transifex * Fixed context injection for RPC server * Remove useless packages from requirements * Add provisioning steps to Direct Engine * Added endpoint and utils to work with events * Enable auto security group when Bug 1392738 is fixed * Fixed issues in docs * Adding hive support for vanilla 2.6 * Use pretty-tox for better test output * Adding usage of "openstack.common.log" instead of "logging" * Updated from global requirements * Add options supporting DataSource identifiers in job\_configs * Removing warnings in the MAPR doc plugin * Hide oslo.messaging DEBUG logs by default * Add integration tests for transient clusters * Move to hacking 0.10 * Use HDFS parameter to inject swift info * Added ability to listen HTTPS port * Added ability to use other services via HTTPS * Updated from global requirements * Enable 5.3 version choice in cdh plugin * Updated from global requirements * Updated from global requirements * fix the edp and hive test issue for CDH5.3 * Refactor db migration tests * Imported Translations from Transifex * Fixes a job\_configs update by wrong value when deleting proxy-user * Adding Storm entry point to setup.cfg * Cleaned up config generator settings * Extracted config check from pep8 to separate env * Fixed topology parameters help in config * Fixed pep8 after oslo update (01/06/2015) * Renamed InvalidException to InvalidReferenceException * Mount volumes with options for HDFS performance * Fixed vanilla1/2 cluster not launched problem * Increase RAM for CDH master processes in CDH IT * Minor refactoring integration tests * Migrate to oslo.concurrency * Adding ability to access context from openstack.common.log * Fixed hdfs mkdir problem in vanilla1 * Add Java type edp test in integration test of CDH plugin * Enable more services in CDH plugin * Adding database detection to migration tests * Fixed pep8 after keystoneclient upgrade * Added validation on proxy domain for 'hiveserver' process * Fix oslo.db import due to move out of the namespace package * Updated from global requirements * Add one more sample for pig job examples 2015.1.0b1 ---------- * Imported Translations from Transifex * Updated from global requirements * Use xml.dom.minidom and xmlutils in unit tests * Saharaclient tests for tempest * Enable HDFS NameNode High Availability with HDP 2.0.6 plugin * All user preserve EDP objects after test * Migrate to oslo.context * Use first\_run to Start Services * Removing unecessary check * Adding Hadoop 2.6.0 support to Vanilla plugin * Fixed configs generation for vanilla2 * Fixed auto security group for nova network * Updated from global requirements * Fixed subprocess error reporting * Fixed scaling with new node group with auto sg * Update oslo-incubator periodic\_task * Update oslo-incubator threadgroup * Update oslo-incubator policy * Update oslo-incubator log * Update oslo-incubator lockutils * Removed \_i18n module, it is not used directly * Updated from global requirements * Update conf sample after oslo.messaging release * Workflow documentation is now in infra-manual * Disabled requiretty in cloud-init script * Storm integration * Fixed Fake plugin for Fedora image * Update plugin descriptions * Add integration test for Hive EDP job * [CDH] Add validation for spark * Support searching job executions by job status * Don't provide CONF to the AuthProtocol middleware * Inherit Context from oslo * Sync latest context module from oslo-incubator * Specify CDH version * Add CDH plugin documents * Added get\_open\_ports description to plugin SPI * Add list of open ports for Spark plugin * Open all ports for private network for auto SG * [CDH] Convert node group config dict * Add test for DB schema comparison * Adding uuids to exceptions * Add db/conductor ops to work with new events objects * Add new events objects to Sahara * Fix broken unit tests * changes to quickstart * Remove py26 from tox * Fixed error on attempt to delete job execution several times * Added hive support for vanilla2 * Support searching job executions by cluster name and job name * Sample JSON files for Sahara EDP APIs * Updated from global requirements * Added checks on deleting cluster * small change to edp\_spi * small change to diskimagebuilder file * Support query filtering for cluster objects * Support query filtering for templates and EDP objects * Enable auto security group for vanilla integration tests * Updated from global requirements * Format volumes filesystems in parallel * Correcting small grammatical errors in logs * Imported Translations from Transifex * Replacing data\_processing with data-processing * Updated from global requirements * Pylint check was broken after pylint update * Refactoring integration tests for Vanilla 1 plugin * Fix for getting auth url for hadoop-swift * Fixed bug with Hive jobs fail * Fixed pep8 after oslo.db config update * Add HBase support to CDH plugin * Add ZooKeeper support to CDH plugin * Fixed auto security group cleanup in case of creation error * Adds doc to devref quickstart document * Add list of open ports for HDP plugin * Fixed trunk pep8 errors * Disable all set of tests (every plugin) by default * Print Cloudera manager logs if integration test failed * Added ability to access a swift from vanilla-1 hive * change to devstack.rst * corrected error in dashboard\_user\_guide * corrected error in overview.rst * corrected error in vanilla\_plugin.html * Add list of open ports for Cloudera plugin * Imported Translations from Transifex * Remove unused class and arguments * Updated from global requirements * Remove oslo-incubator's gettextutils * Drop obsolete oslo-confing-generator * Add link on Hue Dashboard for CDH plugin * Explicitly specifies cm\_api version in CDH plugin * Fixed job execution update in case of proxy command * Removing Swift container support for job binaries * Fixed cluster scaling in distributed mode * Auth policy support implementation * Fix working EDP jobs with non-string configs * Fix vanilla test\_get\_configs() for i386 * Added ability to launch jobs on fake plugin * Fix Cloudera plugin with CDH packages < 5.2.0 * typo found on Sahara Cluster Statuses Overview * Fix bugs on doc registering an image * Fix bugs on Sahara overview * Fix bug on features.rst doc * Fix bug on diskimagebuilder.rst * Make proxy command generic and user-definable * Add checks in fake plugin * Add scaling opportunity for fake plugin * Imported Translations from Transifex * Install ExtJS library for CDH plugin * Fix bug on Sahara UI Dev Environment Setup * Fix dict iteration usage * Fixing validation exception for valid security group * Remove explicit set of CONF.os\_region\_name in mapr plugin tests * Correcting error in NeutronClientRemoteWrapper.\_get\_adapters * Drop some obsolete oslo-incubator modules * Fix 'Clock Offset' error in Cloudera Manager * Add Spark support to CDH * Add missed translations * Added cancel before deleting job execution * Grouped EDP endpoints by type * changes to features.rst * change to edp.rst * Flush netcat socket buffer when proxying HTTP connections * Add Hue support to Cloudera plugin * Add hash to auto security group name for uniqueness * Invalid JSON in quickstart guide * Fix argument list in NeutronClientRemoteWrapper * Fix security groups * MapR plugin implementation * Fix old style class declaration * Imported Translations from Transifex * Fix quickstart guide * Drop obsolete wsgi and xmlutils modules * Add Hive support to CDH plugin * Fix parallel testing EDP jobs for Fedora and CentOS images * Small refactoring of get\_by\_id methods * Use oslo.middleware instead of copy-pasted * Sync with oslo-incubator and removing excutils * Updated from global requirements * Adds openSUSE support for developer documentation * MapR FS datasource * Add volume type support to sahara * Correct parameter name in integration tests * Updated from global requirements * Updated from global requirements * [DOC] Add notes on disabling permissions for Data Processing * Fixed problem with canceling during pending * Remove Vanilla 2.3 Hadoop * Support Cinder availability zones * Add bashate checks * [DOC] Added multi region deployment to features list * Use new style classes everywhere * [DOC] Fixed link from upgrade guide to installation guide * [DOC] Fixed broken list in edp.spi doc * [DOC] Minor change - replaced external link with internal * [IT] Fix deleting transient cluster when cluster in error state * Fix bashate errors * Imported Translations from Transifex * Updated from global requirements * Moved exceptions.py and utils.py up to plugins dir * Adding support for oslo.rootwrap to namespace access 2014.2 ------ * Fix HDFS url description, and other various edits * Remove line saying that scaling and EDP are not supported for Spark * Description of job config hints in new doc page is wrong * Removing extraneous Swift information from Features * Update the Elastic Data Processing (EDP) documentation page * Add documentation on the EDP job engine SPI * Imported Translations from Transifex * Fix working Spark with cinder volumes * Fix scaling with Heat and Neutron * Fixed volumes configuration in spark plugin * Fixed cinder check for non-admin user * Make versions list sorted for Vanilla and HDP * Imported Translations from Transifex * Fix working Spark with cinder volumes * Fix scaling with Heat and Neutron * Support Cinder API version 2 * Parallel testing EDP jobs * Fix HDFS url description, and other various edits * Fixed cinder check for non-admin user * Support Nova availability zones * Remove line saying that scaling and EDP are not supported for Spark * Description of job config hints in new doc page is wrong * Removing extraneous Swift information from Features * Update the Elastic Data Processing (EDP) documentation page * Add documentation on the EDP job engine SPI * Fixed volumes configuration in spark plugin 2014.2.rc1 ---------- * Add links for Spark images * Use $((EXPRESSION)) instead of $[EXPRESSION] * Open Kilo development * Sahara UI panels configuration docs updated * Updating RDO installation documentation * Update custom hacking checks * Update CONTRIBUTING.rst * Added docs for running Sahara in distributed mode * Removed mentions of Sahara Dashboard * Adding Spark to the list of default plugins * [DOC] Changed feature matrix for Spark * Fixed broken pep8 after keystone update * Adding job execution examples to UI user guide * Updating Hadoop-Swift documentation * Add CDH plugin in plugin availability matrix (userdoc) * Updated from global requirements * Add devref/devstack to docs index * Adding links for Juno Fedora images * [DOC] Removed feature matrix for heat engine * Image building docs updated * Updated REST API documentation * Update links for plugin images * [DOC] Made disk image builder docs more accurate * [DOC] Made EDP requirements plugin specific * [DOC] Switched docs from answers.launchpad.net to ask.o.o * [DOC] Fixed deprecated config style in devstack instruction * Adding missing CDH resources to MANIFEST.in * [Vanilla] Increased security of temporary files for db * Changed hardcoded 'hadoop' hdfs user name to template * Use 'auth\_uri' parameter from config * Changing Hadoop to "Data Processing" * Updating documentation for overview/details * Imported Translations from Transifex * Add pip-missing-reqs tox env * Add genconfig tox env * Fix typo in CDH description * Updated from global requirements * [DOC] Minor change - added missing period * Add entry for Yevgen Runts to avoid dup author * Add entry for Sofiia to avoid dup author * Add entry for Andrey Pavlov to fix author name * Add entry for Kazuki Oikawa to avoid dup authors * [DOC] Removed note about SAHARA\_USE\_NEUTRON in sahara-dashboard * Imported Translations from Transifex * Imported Translations from Transifex * Fixed descriptions for db migrations * Fixed example of hadoop versions return in plugin SPI * Removed remaining 'swift-internal' prefix * Add missed translations at service/validations/edp * Remove direct dep on oslo-incubator jsonutils * Sahara-Dashboard docs updated * Imported Translations from Transifex * Refactoring HDP plugins to allow multiple Zookeeper servers * Updated from global requirements * Added information about sahara settings to cluster * Fixed the localrc file for enabling swift services * Fixed terminate\_unneeded\_clusters fail in distributed mode * Default value of 'global/namenode\_opt\_maxnewsize' should be 200m * Adding documentation for proxy domain usage * Removed attempt to ignore tests in pylint * Remove direct dep on oslo-incubator timeutils * Update oslo processutils module * Update oslo lockutils module * Update oslo log module * Update oslo jsonutils module * Sync oslo strutils module * CDH manager-node flavor change * Add use of nova\_kwargs for nova servers create to improve readability * Imported Translations from Transifex * Renamed pylintrc to be found by pylint * Made link to devstack installation internal (instead of external) * Moved validate\_edp from plugin SPI to edp\_engine * Install packages for CDH plugin without their starting * Install non deprecated DB for Cloudera Manager * Added missed translation for service.edp.spark * Adding a periodic task to remove zombie proxy users * Refactoring DataSources to use proxy user * Updating JobBinaries to use proxy for Swift access * Adding trust delegation and removal for proxy users * Adding proxy user creation per job execution * Adding configuration and check for proxy domain * Migrate to oslo.serialization * Renamed missing 'savanna' tags to 'sahara' * Fix cluster creation with heat engine * Update sahara.conf.sample * Imported Translations from Transifex 2014.2.b3 --------- * Imported Translations from Transifex * Fixed typo in integration tests error handling * Add warn re sorting requirements * Add spark to toctree on doc index page * Fix doc issues * Add doc8 tox env * Replaced range with six.moves.range for significant ranges * Removed comment about hashseed reset in unit tests * Allowed to specify IDs for security groups * Switched anti-affinity feature to server groups * Moved get\_oozie\_server from plugin SPI to edp\_engine * Moved URI getters from plugin SPI to edp\_engine * Updated docs with security group management feature * Minor change - removed unnessary parentheses * Added translation for CDH plugin description * [HEAT] Fixed rollback error on failure during scale down * Implemented get\_open\_ports method for vanilla hadoop2 * Added ability to create security group automatically * Catching all connection errors in waiting HDP server * Make starting services in Vanilla 2.4.1 parallel * Add notifications to Sahara * Fix help strings * Updated from global requirements * Waiting connect cloudera agents to cloudera manager * [HDP1.3.2] Fixed bug with decommissioning cluster * Imported Translations from Transifex * Remove host from CDH cluster after decommissioning * Enable swift in IT for CDH by default * Documented heat engine backward compatibility break * Use Vanilla 2 plugin for transient checks * Use auth\_token from keystonemiddleware * Updated from global requirements * Fix updating include files after scaling for vanilla 2 plugin * Add EDP IT after scaling for vanilla 1 plugin * Make Vanilla 2.3.0 plugin deprecated * Imported Translations from Transifex * Adjust RESTAPIs convert-config w/suggests from SL * Removed sqlite from docs * Removed support of swift-internal prefix * Removed one round trip to server for HDFS put * Added create\_hdfs\_dir method to oozie edp engine * Made EDP engine plugin specific * Do not rely on hash ordering in tests * Fix some of tests that rely on hash ordering * Fix jsonschema>=2.4.0 message assertion * Fixed wrong use of testtools.ExpectedException * Fix using cinder volumes with nodemanager in HDP2 * Correction of words decoMMiSSion-decoMMiSSioning * Add tests for ops.py * Add Spark integration test * Fix starting instances after scaling for CDH * Improved error handling for provisioning operations * Fix parsing dfsreport for CDH in integration tests * Unit tests for CDH plugin * Imported Translations from Transifex * Updated from global requirements * Create etc/edp-examples directory * Fixed Exception failures caused by i18n * Add translation support to plugin modules * Imported Translations from Transifex * Remove unused parameter from CDH IT * Fix scale up cluster on CDH plugin with vanilla image * Fixed DecommissionError bug * Imported Translations from Transifex * Fixed bug with NotFoundException * Migration to oslo.utils * Imported Translations from Transifex * Fixed concurrent job execution with external hdfs * Update oslo.messaging to alpha/juno version * Update oslo.config to the alpha/juno version * Updated from global requirements * Move middleware package to api package * Imported Translations from Transifex * Removed a duplicate directive * Added ability to specify security group for node group * Fixed cluster rollback on scaling with heat engine * Fix closing HTTP session in Ambari plugin * Add test for storing data in DB for 007 migration * Group tests by class * Imported Translations from Transifex * Fixed a ValueError on provisioning cluster * Adding job execution status constants * Add a Spark job type for EDP * Fix put\_file\_to\_hdfs method in hdfs\_helper * Set python hash seed to 0 in tox.ini * Adding generic trust creation and destruction methods * Add oslo.messaging confs to sample config * Fixed logging about changes of cluster status * Add translation support to service and missed modules * Imported Translations from Transifex * Implement EDP for a Spark standalone cluster * Imported Translations from Transifex * Waiting deleting Heat stack * Integration tests for CDH plugin * Add CDH plugin to Sahara * Add rm from docs env to whitelist to avoid warn * Add translation support to service and utils modules * Migration to oslo.db * Imported Translations from Transifex * Removed extra work in case of no volumes * Add translation support to upper level modules * Adding sanitization for trusts in JobExecution model * Removed code duplication on cluster state change * Mark floating-IP auto-assignment as disabled also with Neutron * Updated from global requirements * Use with\_variant method for dialects db types 2014.2.b2 --------- * Delete migration tests for placeholders * Fixed bug with empty "volumes" when heat engine is used * Add support testing mr job without log checking * Migrate integration tests to oslotest * Append to a remote existing file * Fixed diction: VMWare should be VMware * Imported Translations from Transifex * Fix a auth\_uri cannot get in sahara-engine * Create an option for Spark path * Bump Hadoop to 2.4.1 version * Wrap eventlet's Timeout exception * Imported Translations from Transifex * Add support skipping EDP tests for vanilla 2 plugin * Update oslo-incubator db.sqlalchemy module * Update oslo-incubator threadgroup modules * Update oslo-incubator processutils module * Update oslo-incubator periodic\_task module * Update oslo-incubator network\_utils module * Fix creating cluster with Vanilla 2.4.0 plugin * Fixes failure to scale cluster adding new Hive or WebHCat service * Revert "Fix use of novaclient.exceptions.NotFound" * Renamed Pending to PENDING fixes bug 1329526 * Update oslo-incubator loopingcall module * Update oslo-incubator context module * Update oslo-incubator config.generator module * Update oslo-incubator lockutils module * Update oslo-incubator fileutils module * Update oslo-incubator log module * Fix scaling cluster Vanilla for Hadoop 2.3 * Updated from global requirements * Add vanilla plugin with Hadoop 2.4.0 * Fixed configuring instances for Vanilla 2.0 * Fix hardcoded username(ec2-user) for heat-engine * Fixed EDP job execution failure * Fix use of novaclient.exceptions.NotFound * Update oslo-incubator excutils module * Update oslo-incubator jsonutils module * Update oslo-incubator importutils module * Update oslo-incubator strutils module * Update oslo-incubator gettextutils module * Update oslo-incubator timeutils module * Allow plugins to choose the EDP implementation * Refactor the job manager to allow multiple execution engines * Use oslo.i18n * Add oslo.i18n lib to requirements * Update image registry docs to use cli * Imported Translations from Transifex * Remove docutils pin * Fixed hadoop keys generation in case of existing extra * Switched Sahara unit tests base class to oslotest * Update doc for REST endpoint convert-config * Extend status\_description column in Clusters tables * Updated from global requirements * Update docs to reflect the changes in security group section in horizon * Fix formatting in readme for vanilla configs * Added validation check for number of datanodes * Imported Translations from Transifex * Fix tools/get\_auth\_token * Corrected a number of pep8 errors * Changed HDP unit tests base class * Updated from global requirements * Fixed volumes mount in case of existing volumes * Adds DataNode decommissioning support to HDP Plugin * Refactoring vanilla 2 plugin * Fix docs to use sahara-all instead of sahara-api * Use immutable arg rather mutable arg * Upgrades the HDP plugin to use Ambari 1.6.0 * Fix detaching cinder volumes * Updated from global requirements * Upgrades the HDP plug-in to install Hue * Fixed number of hacking errors * Updated from global requirements * Small fixes in README migration file * Imported Translations from Transifex * Implement scaling for Spark clusters * Installation guide updated * Fix Sahara CI links * Fixed H405 pep8 style check * Updated from global requirements * Make deleting transient clusters safe * Fix docs for configuring authentication * Handle remote driver not loaded situation * Migrated integration tests to testtools * Remove vim editor configuration from comments * Fixed indent in testing docs * Updated from global requirements * Imported Translations from Transifex * Fixed E265 pep8 * Removed cluster retrieving in provisioning engine * Added new hacking version to requirements * Updated from global requirements * Hided not found logger messages in unit tests * Migrated unit tests to testtools * Sync up oslo log module * Fixed /etc/hosts update for external hdfs * Fixed status update for job execution * Update job execution status on cluster deletion * Fixed remote call in external HDFS configuration method * Remove usage of remote from HDP Instance constructor 2014.2.b1 --------- * Added jobhistory address config to vanilla 2 * Added secondary name node heap size param to vanilla plugin * Minor EDP refactoring * Update documentation for Spark 1.0.0 * Use in-memory sqlite DB for unit tests * Imported Translations from Transifex * Added several checks on deleted cluster to prevent error logs * Changing job excecution status to 'FAILED' in case of exception * Add Spark 1.0.0 to the version list * Rework keystone auth\_token middleware configs * [HDP] Integration tests for HDP 2.0.6 * Add Spark to overview and feature matrix * Documentation for the Spark plugin * Adding disconnected mode fixes to hdp plugin * [HDP] Changed test tag for HDP1 plugin * Made Swift topology optional for data locality * Add warn re alpha readiness of distrib mode * Updated from global requirements * Sync the latest DB code from oslo-incubator * Added ability to run HDFS service only with Hadoop 2 * Removed versions from Vanilla plugin description * Fixed oozie component name in HDP exception * Added validate\_edp method to Plugin SPI doc * Added validation for long hostnames * Add upgrade notes for sahara-api to sahara-all * Updated from global requirements * Replaced RuntimeErrors with specific errors * remove default=None for config options * Removed unused global var and unnessary param * Add Spark plugin to Sahara * Fix intermittent transient cluster tests failure * Synced jsonutils from oslo-incubator * Added validation check that network provided for neutron * Remove unused parameters in integration tests * Remove unused function from xmlutils * Fix typo: Plaform -> Platform * Fix working sahara with heat and nova-network * Removed unneeded check on job type during job execution * Add ".sahara" suffix automatically to swift URLs in workflows * Removed migration-time config folders lookup * Remove all mostly untranslated PO files * Made processes names case sensitive * replaced e.message * Remove monkey\_patch from test\_context * Fix hardcoded tenant name for job binaries * Imported Translations from Transifex * Run periodics in sahara-engine instead of sahara-api * Create trusts for admin user with correct tenant name * Imported Translations from Transifex * Updated from global requirements * Clean up openstack-common.conf * correcting the MANIFEST.in paths * correcting the MANIFEST.in paths * Extended plugin SPI with methods to communicate with EDP * Allow HDFS data source paths without the hdfs:// scheme * Improve validation for swift data source URLs * Imported Translations from Transifex * Updated from global requirements * Replaced the word components with component(s) * Updated from global requirements * Synced jsonutils from oslo-incubator * Split sahara into sahara-api and sahara-engine * [IT] More coverage of EDP in tests * Add sahara-all binary * Imported Translations from Transifex * Fix eventlet monkey patch and threadlocal usage * Change the package name of the example to org.openstack.sahara.examples * Imported Translations from Transifex * Fix running EDP job on transient cluster * Add simple fake plugin for testing * Imported Translations from Transifex * Moved information about processes names to plugins * Updated architecture diagram in docs * Forced lowercase for instance names * Improved validation for data-sources creation * Add upgrade doc stub page * Updated from global requirements * Add secondarynamenode support to vanilla 2 plugin * [IT] More coverage of EDP in tests * Add tenant\_id getting in integration tests * Added support of multi-region environment * [IT] Fixed error when skipping scaling test * Fixed validation of novanetwork w/o autoassignment * Avoid deleting transient cluster before job is started * Fixed wrong exceptions use for decommission errors * Implementing constants for the job types used by EDP * Change IRC channel name to #openstack-sahara * Imported Translations from Transifex * Remove IDH plugin from sahara * Fix storing binaries in Swift * Updated hdp\_plugin features to align with current capabilties * Saharaclient must be installed for UI to work in dev environment * Change links to images in Quick Start guide * REST API 1.1 corresponds to Icehouse as well * Updated validation section for Vanilla Plugin * Add \*.log files to gitignore * Fix up DevStack guide * Imported Translations from Transifex * Cleanup of docs for integration tests * Fix up Sahara UI installation guide * Updated from global requirements * Fixed wrong use of SaharaException * Update links for vanilla images in doc * Minor fixes to Sahara UI Installation Guide * Fix big job binary objects in mysql * Doc's update for integration tests * Removed possibility to run job w/o Oozie * Removed impossible branch of 'if' statement * Fix up installation guide * Add a custom filter method to scan wrapped dict results * Check that all po/pot files are valid 2014.1.rc1 ---------- * Add examples of upstream files that we should not change * Updating the setup development environment docs for icehouse * Update EDP requirements for hadoop v2 * Added rackawareness to Hadoop 2 in vanilla plugin * Do not document use\_identity\_api\_v3 in the sample-basic file * Add short info re testing * Reserve 5 migrations for backports * Compact all Icehouse migrations into single one * Added parameters to configure a list of node group processes * Add description to use IDH plugin with requests * Fixed tests failures when SKIP\_ALL\_TESTS\_FOR\_PLUGIN=True * Fix db management: don't autocreate db on start * Updating the vanilla image building docs * Add a page to the developer guide on Alembic migrations * Add a paragraph discouraging modification of upstream files * Open Juno dev * Update REST api docs * Updating dashboard user guide doc for icehouse * [IDH] Integration tests for IDH 3.0.2 * [IDH302] Restoring cluster parameters after scaling * Fix check active nodemanagers for vanilla 2 plugin * Heat docs update * Fix default repo links and tarball links for IDH * Add EDP integration tests for vanilla 2 plugin * Filter 'fields' from JobExecutions returned from REST api * Renamed 'idh' integration tests to 'idh2' * Standardize README header * Fixed wrong attached volume's names via Heat * Some configs updates for vanilla 2 plugin * Remove Mirantis copyright from README * Add EDP support for Vanilla 2 plugin * Add fixed and floating IPs discovery via neutron * Updated from global requirements * Change tag for vanilla integration test to 'vanilla1' * Remove agent remote * Fix parallel running integration tests with vanilla plugins * Fix transient clusters termination * Add note about OS\_TENANT\_\* to integration tests * Add integration tests for vanilla 2 plugin * Validate data sources reference different resources * Add transient tag to transient cluster test * Fix running integration tests by tag * [IDH] Fixed cluster scale down * Filter credentials in jobs returned from REST api * Fixed incorrect use of RuntimeError * Rename missed env variables in oslo code * Move swift configs to core-site.xml * Prepare integration tests for use for hadoop 2 * Imported Translations from Transifex * Updated from global requirements * Added missing lib to dev UI installation guide * Added python-pip installation to dev environment instruction * Rename strings in plugins dir * Missed renames in code base * Missed renaming in docs * Integration test for a transient cluster was added * Add Job History Server process to vanilla 2 plugin * Fixup 'savanna' references in run\_tests.sh * Override 'savanna' strings in openstack/common * Miscellaneous renaming string fixes * Change remaining references in the doc subdir * Change savanna references in top level docs * Completely remove etc/savanna dir * Move integration tests to python-saharaclient 0.6.0 * Imported Translations from Transifex * Change remaining savanna namespaces in setup.cfg * Change 'savanna' references in tools * Renaming files with savanna words in its names * Change remaining 'savanna' references in sahara/tests * Change "\_savanna\_" image properties to "\_sahara\_" * Keep python 3.X compatibility for xrange * Rename 'self.savanna' to 'self.sahara' in integration tests * Change the 'savanna-db' scheme to 'internal-db' * Changed Savanna to Sahara in documentation images * Move the savanna subdir to sahara * Replaced or removed Savanna words in comments * Replaced all Savanna words in class names * Renames all doc references from Savanna to Sahara * Update i18n config due to the renaming * Renamed all swift-dependent configs to sahara * [IDH] Initial documentation for IDH plugin * We're now using nove client >= 2.17.0 * [IDH] Fixed history server assignment * Fixed reference errors in docs * Update .gitreview to point on updated repo * Updated from global requirements * Cleanup openstack-common.conf * Updated from global requirements * Update oslo-incubator config module * Update oslo-incubator service module * Fixed typo in rollback function description * Make savanna able to be executed as sahara * Removed log message duplication * Update oslo-incubator context module * Update oslo-incubator processutils module * Update oslo-incubator periodic\_task module * Update oslo-incubator loopingcall module * Update oslo-incubator log module * Update oslo-incubator jsonutils modules * Update oslo-incubator importutils module * Update oslo-incubator excutils module * Update oslo-incubator gettextutils module * Update oslo-incubator common module 2014.1.b3 --------- * Fixed bug with unxpected stack delete * Minimal "lifetime" of transient cluster * Add cluster validation to vanilla 2 plugin * Add scaling support to vanilla 2 plugin * Removed EDP dependency on hive server * Updated from global requirements * Add swift support to vanilla 2 plugin * Use keystone v3 api by default * Add alias 'direct' for savanna/direct engine * Expand cluster-template usage validation message * Make decommissioning timeout configurable * Intial Agent remote implementation * [IDH] Added IDH 3.0.2 support * Updated features comparision heat with direct engine * Add Hadoop 2 vanilla plugin * Added scaling parameters to HDP plugin config * Removed EDP dependency on job\_tracker instance * [IDH] Added ability to support several versions * Fix scale down cluster * Updated from global requirements * Updated from global requirements * Fixed itests to work with new savannaclient * Changed get\_node\_groups to receive only one node process * [IDH] Removed copy-pasted test utility file * Added IDH plugin to savanna config * Replace service-specific exceptions with general (continuation) * Throw exception if get\_instance found several candidates * Added EDP test for HDP plugin * Improve help strings * Updated from global requirements * Added networks validation * Updated from global requirements * Replace assertEqual(None, \*) with assertIsNone in tests * Make savanna-db-manage able to discover configs * Filter credentials field in data\_sources returned from REST api * Expand swift data source credential tests * Fix non-deterministic a-a test * Add ability to support several versions vanilla plugin * Cinder test to integration tests was added * Replace service-specific exceptions with general * Speed up of Heat provisioning via Neutron * Hiding neutron Client class * Move client docs to python-savannaclient * Fix running IT for IDH plugin * Expand node-group-template usage validation msg * Auto generate and check config sample * Move REST API docs to separated dir * Standardize config sample locations * Fix how migration's cli register db connection opt * Delete 'links' only if it is present * Shorten swift-internal:// to swift:// * Add run\_test.sh for running tests * Attach volumes in parallel * Keep py3.X compatibility for urllib * Use six.moves cStringIO instead of cStringIO * Fix swift data source credential validation * Don't raise MySQL 2013 'Lost connection' errors * Add integration tests to Intel plugin * Fix cluster scaling in IDH plugin * Enable HDP 2 deployment leveraging HDP plugin * Filter credentials when returning job binaries through REST api * Add support retrying rest call in IDH plugin * Add userdoc install instructions for Fuel * Switch over to oslosphinx * Sort modules in openstack-common.conf * Rename Openstack to OpenStack * Use six.moves.urllib.parse instead of urlparse * Remove extraneous vim configuration comments * Fixed hadoop dir creation during hadoop-swift lib download * [IDH] Fixed cluster start without jobtracker service * Remove all support for "Jar" as a job type (alias for "MapReduce") * Further preparation for transition to guest agent * Add support for dotted job types * Remove compatibility code allowing "args" as dict * Fixed a small typo * Fix imports ordering and separation * Sync with global requirements * Make remote pluggable * Fix typo in savanna/tests * [IDH] Fixed cluster start without jobtracker service * Add utilities for supporting dotted job types * Remove extra Java job type fields from JobExecutions * Modify the REST doc to show a Java job type execution * Update the edp user doc to discuss "edp." configs for Java jobs * Move 'main\_class' and 'java\_opts' into edp.java configs * Default OpenStack auth port was changed * Sync with global-requirements * Refactored unit tests structure * Add integration test for streaming mapreduce * Add validation check for streaming elements on MapReduce without libs * Generate streaming tag in mapreduce job * Extract configs beginning with "edp." from job\_configs['configs'] * [DOC] Fixed link to oozie in docs * Add tag generation to mapreduce workflow * Imported Translations from Transifex * Separated "tests for utils" and "utils for tests" in unit tests * Remove kombu from requirements * [Integration tests]Deleted unnecessary underscores * Fixed HDP plugin to support Heat engine * Validation of job execution data should raise InvalidDataException * Update oslo-incubator db.sqlalchemy module * Update oslo-incubator py3kcompat module * Update oslo-incubator middleware.base module * Update oslo-incubator processutils module * Update oslo-incubator service module * Update oslo-incubator threadgroup module * Update oslo-incubator log module * Update oslo-incubator timeutils module * Update oslo-incubator gettextutils module * Small fix in development install guide * Fix nova client initialization arguments * Setup logging for wsgi server * Bump stevedore to >=0.14 * Fixed potential problems with global CONF in unit tests * Enable EDP on private neutron networks * Allow boolean "streaming" in Job JSON * Added more strict check for heat stack statuses * Updated from global requirements * Require "libs" for MapReduce and Java jobs and disallow "mains" * Fixed reading topology file with newline at the end * Fixed potential problems in test\_periodic.py * Add a config flag to disable cluster deletion after integration test * Add an hdfs data source example to the rest doc * Update Ambari Repo location and services refactoring * Fixed HDP plugin to support Heat engine * Updated from global requirements * Fixed typo in unit tests utility method * Removed underscore from valid symbols for names used as hostname * Made general name validation less strict * Add support deprecated db param in savanna-db-manage * Disable autocreating database when start savanna * Update install guide * Make error logging more safe * Added short doc about new Heat engine 2014.1.b2 --------- * Add integration test for Oozie java action * Updated from global requirements * Read Swift credentials from input\_data OR output\_data * Add alembic migration tool to sqlalchemy * Update EDP doc * Imported Translations from Transifex * [IDH] Added config controlling hadoop-swift.jar URL * [Vanilla] Updated docs to point to icehouse images * Change configs["args"] to be a list for Pig jobs * Ignore key/value pairs with empty keys in workflow generation * Add code to configure cluster for external hdfs * Imported Translations from Transifex * Add support for HBase in HDP plugin * Imported Translations from Transifex * Add missed i18n configs to setup.cfg * Enable check of Heat engine for Vanilla and HDP * Enable heat engine to launch cluster without keypair * Fix installation intel plugin * [IDH] Fixed work with cluster configs * Added 'oozie' service support to IDH plugin * Fixed wrong instance name with Heat engine * Added anti-affinity feature to Heat engine * Changed Vanilla plugin to use ports from config * Changed HDP plugin to use ports from config * Add util method to get port from address * Update sample savanna config * [Vanilla] Added unit test on get\_hadoop\_ssh\_keys method * Added cache for image\_username * Fixed cluster template with no nodegroups creation * Extract common part of instances.py and instances\_heat.py * Remove unused node\_group parameter in get\_config\_value * Minor exception text changes * Update oslo-incubator db.sqlalchemy module * Update oslo-incubator db module * Update oslo-incubator py3kcompat module * Update oslo-incubator service module * Update oslo-incubator gettextutils module * Update oslo-incubator timeutils module * Add Oozie java action workflows * Eliminate extra newlines in generated workflow.xml * Fix typos in edp integration test utility method name * Fix typo in error message * Fix typo in error message * Update oslo-incubator db module * Update oslo-incubator service module * Fix deleting cinder volumes * Properly catch timeout exception raised in thread * Added unit-tests to Heat engine * Fix mounting cinder volumes * Adding IDH plugin basic implementation * Reset CONF for topology\_helper and services during unit tests * Delete determine\_cluster\_config method from vanilla plugin * Fixed issue with undeleted instances * Integration tests related changes * Do not check the status of a job execution if Oozie id is None * Moved tests for general utils out of vanilla package * Node group handling improved in the db module * Update oslo-incubator processutils module * Update oslo-incubator loopingcall module * Update oslo-incubator periodic\_task module * Update oslo-incubator log module * Update oslo-incubator excutils module * Update oslo-incubator db.sqlalchemy module * Update oslo-incubator timeutils module * Wait for HDFS readiness after datanode services start * Increase timeout for Ambari server setup * Minor refactoring of vanilla create cluster * Fixed reporting about new cluster state * Changing oozie libs setup to manual copy * Removal of AUTHORS file from repo * Change "Jar" job type to "MapReduce" * Template names in integration tests were changed * Add generating new keypair to hadoop user in vanilla plugin * Removed cloud user private key pushing to nodes * Enable data locality for HDP plugin * Integration tests related improvements * Added heat service retrieving from keystone catalog * Fix getting cinder devices in heat * Remove properties from Object classes * Added 'gcc' to requirements in dev instructions * Launch integration tests with testr * Provisioning via Heat * Migrating to testr * Python client docs added * Docs in integration tests were updated * Sync requirements: pin Sphinx to <1.2 * Fix some typos in configs/messages * Integration tests have image related changes * Sync minor updates in oslo * Sync minor updates in oslo.db module * Add py3kcompat utils module * Oslo sync: make wait/stop funs work on all threads * Bump savanna client used for tests to >= 0.4.0 * Make infrastructure engine pluggable * Fixed link to how\_to\_build\_oozie page from index * Added savanna component to devstack installation instruction * Use stevedore for plugins loading * Enable cluster deployment with pre-installed JDK * Remove plugin from service/instances.py * Drop os.common.exceptions * Fixed wrong flavor validation * Use @six.add\_metaclass instead off \_\_metaclass\_\_ * Use six for iter keys/values 2014.1.b1 --------- * Remove missed call get\_plugin\_opts * There is no sense to keep py33 in tox envs * Added Neutron support to integration tests * Added missing default message in InvalidCredentials exception * Improved error handling in vanilla plugin * Fix getting hidden vanilla plugin parameters * Remove unused oslo libs * Remove unused plugins opts support * Fix typo in node group property documentation * Removed usages of uuidutils.generate\_uuid() * Revert "Support building wheels (PEP-427)" * Fixed bug when Oozie heap size is not applied * Add support for sqoop service in HDP plugin * Bump version to 2014.1 * Support building wheels (PEP-427) * Enable EDP with HDP plugin * Hacking contains all needed requirements * Replace unicode() with six.text\_type() * Fix auth url in swift * Remove check already released in hacking 0.8.0 * Fix style errors and upgrade hacking * Replace copy-pasted HACKING.rst with link * Upgrade openstack common from oslo-incubator * Convert to modern form of openstack-common.conf * Fixed Integration tests * Added json REST samples for edp * Changed use of images for integration tests * A timing/profiling utility for savanna * Changed use of flavors for Integration tests * Add support for cinder to HDP plugin * update installation guide * update guide document * Added check to string validations to skip non-strings * Add a general requirements section for guest images * Add Oozie building instruction * Set iso8601 logging level to WARN * Enable network operations over neutron private nets * Add a requirements section to the EDP doc * Fix web UI ports bug in vanilla plugin * Sync with global-requirements * Add missing flag to UI docs * Add support for oozie in HDP plugin * Added a check for Oozie configs * Remove duplicate retrieve\_auth\_url * Add support for Hive related services * Integration test for Swift has changes * Make 'ls' check threaded * Include Vanilla Plugin \*.sql files * Changed Integration tests * Docs for integration tests was added * Revert "Add link to centos image" * Add link to centos image * Fixed some warnings during doc building: * Decreasing integration test for cluster configs 0.3 --- * Use release version of python-savannaclient * Added REST API v1.1 section * Include the EDP Technical Considerations page in the EDP page * Add content to userdoc/edp.rst * Fix bug with auth\_token in trusts * Refreshed sample config files * Add lower bound for the six dep * Remove the section label markups for EDP 0.3.rc4 ------- * Use python-savannaclient 0.3.rc4 * Minor docs restructurization * Remove KeypairManager.get monkey patch * Update end time in job execution after job complete * Unconditionally monkey patch nova.keypairs.get * Changing SAVANNA\_URL to use v1.1 of the savanna-api * Integration test improvements * Enhance logging * Remove extra agrument from call of run\_job after cluster start 0.3.rc3 ------- * Use savanna client 0.3-rc3 * Add validations for name fields in all EDP objects * Replace DBError with DeletionFailed for DataSource and Job * Remove the 2.0 related version code * Fix the \_assert\_types test to allow for fields that are enums * Add \_\_init\_\_.py file to enable edp validation tests * Add roles to trusts creating * Fixed issue with wrong validation of jobs creation * First cut at UI documentation * Fix auth url retrieval for identity * Fix lost anti\_affinity field from Cluster Template * Add a page for documentation of the Savanna Python client * Another change to parallelize Vanilla plugin provisioning * Added EDP testing * Remove unused EDP JSON validation schemes to prevent confusion * Need to empty /tmp/\*-env.sh, before appending * config\_helper.py doesn't handle negative int correctly * Added data-locality feature description * Sync openstack common with oslo stable/havana * Move swift client to runtime requirements * Hide savanna-subprocess endpoint from end users * Docs for Cluster statuses * Added rack topology configuration for hadoop cluster * Add new EDP sections to the documentation tree 0.3.rc2 ------- * Configuring state hanging fixed * Fix database model for Job Binary * Bump savanna client version to 0.3-rc2 * Right import of modules in itests was made * Excessive log was deleted * Docs updated for image usernames * Close FDs for subprocesses * Follow hacking about import * Adding Denny Zhang to AUTHORS * Delete constant 'GENERAL\_CONFS' from config\_helper.py * Fixed typos in docs * Add back copy-pasted theme for Read The Docs only * Starting Job Execution in separate thread * Update stackforge links to openstack * Fix typos in userdoc * Add missing package dependency for test\_requirements.txt * Fix docs layout for Read The Docs * Fix version generation (pep8) * Update .gitreview file following repository move * Increase timeout for decomission operation * Sync with global requirements * Trusts for longrunning tasks * Allow job binaries to be retrieved from internal swift 0.3.rc1 ------- * Impl multitenancy support * Replace copy-pasted sphinx theme with oslo.sphinx * Improvements of integration tests * Add support for multiple HDP versions * Implement threaded SSH for provisioning and Vanilla plugin * Print request body when log-exchange flag is true * Add admin context for non-request ops * Migration to new integration tests * Added missed default configs for Oozie-4.0.0 * Removing line breaks from default configs 0.3a1 ----- * Add /jobs/config-hints/ REST API call * Add running hdfs operations from plugin specific user * Revert bump of alembic version * Integration test refactoring * Fix submitting hive job * Oozie manager enhancement * Bump oslo.config version to use Havana release * Refactoring job execution flow * Fix Cinder volumes support with xenserver * Doc fix for replacement of Hadoop version in Vanilla plugin * Add default sqlite db to .gitignore * Impl context.to\_dict() * Set default log levels for some third-party libs * Temporarily fixes bug #1223934 * Sync requirements with global requirements * Remove version pbr pins from setup\_requires * Enable swift integration * Edit doc for diskimage-builder * Get ambari mirror with curl instead of wget * Floating ip assignement support * Added validation for 'default\_image\_id' field for cluster create * Fixed wrong usage of SavannaException * Fix exception handling in Savanna subprocessing * Add horizon install instructions for RDO * Add pointer to userdoc from horizon guide * Add userdoc install instructions for RDO * Refactor job manager to isolate explicit references to job type * Fixed rep\_factor calculation in cluster shrink validation * Modify job\_configs fields to hold configs, args, and params * Docs fix for Neutron and Floating IP supprot * Docs fix for scaling * Fix Cluster Template name * Add direct dependency on iso8601 * Fixed output of --version command * Partial implementation for bug 1217983 * Filter out some vendor based input from a template upload * Replacement of Vanilla Hadoop 1.1.2 to Hadoop 1.2.1 * Fix print non unicode symbols in remote exception * Add complete paths in MANIFEST.in * Added job status update and hook for transient cluster shutdown * Configuration token replacement is incorrect for some topologies * Don't use ModelBase.save() inside of transaction * Fix random fails of unit tests * Add "mains" and "libs" fields to JobOrigins * Wrapping ssh calls into subprocesses * Partial resolution to bug 121783 * Fix AUTHORS file * Sync oslo with os/oslo-incubator * Sync requirements with os/requirements * Use setup.py develop for tox install * Update ambari admin credentials for scaling * Fix typo * Update Ambari repo URL for 0.2.2 release * Fix job manager for hive action * Documentation about HDP plugin * Add an ability to configure a job * Fix developer install guide from horizon * Add Hive + MySQL configuration * Fix create cluster with cinder * Remove an unncecessary loop from validation code * Get rid of headers in context * Added corrections to the documentation * Use api v1.1 for integration tests * Add hive workflow creator * Move Babel from test to runtime requirements * Remove failing on sqla 0.7.X assert * Make model\_base work with sqla 0.7.X * Sync requirements with global-requirements * Docs update for Neutron support * Added Hive configuration, new nodeprocess - hiveserver * Get rid of pycrypto dep * Fix "Broken Cinder Volume" * Neutron support * Fixed typo in development quickstart guide * Extend JobBinary REST api to allow retrieval of raw data * Added job execution after cluster start and operation for job execution * Oozie + MySQL configuration * Enable the scaling up of nodes in a cluster * Install configs to share/savanna from etc/savanna * Migrate to pbr * First version of job manager * Ensure that translations packaged to tarballs * Add support of periodic tasks for edp needs * Add initial oslo-related strings to the pot * First steps for i18n support * Upgrade oslo and add periodic\_task module * Check for valid flavor on cluster create * Add an API for managing job binaries in the savanna db * Add database support for JobBinary objects * Hadoop-Swift integration jar moved to the CDN * Limit requests version * Added Heap Size provisioning for Oozie * JobOrigin REST and API integration * Integration REST and conductor API * Add comment about keypairs tweak removal * Test added for sqla MutableList * Test added for sqla MutableDict * Remove legacy filtering code from sqla model base * Add test for sqlalchemy JsonEncoded type decorator * Add \_\_author\_\_ attr check * Allow Ambari port to be specified in configuration * Sync OpenStack commons with oslo-incubator * Fix custom hacking check id * Refactoring cinder support * Revert "Refactoring cinder support" * Refactoring cinder support * Migrate to Conductor * Remove timeout for id\_rsa generation * Hadoop test can turn on and turn off * Added cluster deletion during failure * Add database support for the JobOrigin object * Resolved issue with wrong comparison * Sync with global requirements * Raise eventlet to 0.13.0 * Bump hacking to 0.7 * Improve exceptions handling in created threads * Added cluster states transition logging * Add a stub API method for updating a JobOrigin object * Fail tests if cluster in Error state * Oozie bug-fixing * Added conductor API for JobExecution Object * Conductor code fixed and extended tests added * Revert "Conductor objects are re-populated on update" * Several fixes and improvements for conductor * Integration test updating for "HDP" plugin * Add initial version of the REST api for the job origin component * Made Ambari RPM location configurable * Fix test files names * Fix retrieval of updated id * Updated how\_to\_participate doc * Add check for deprecated method assertEquals * Conductor API re-init only objects, not IDs * Allow Ambari users to be specified in configuration * Added conductor API for Job Object * Conductor objects are re-populated on update * Refactoring hdp plugin * Added conductor API for DataSource object * Added first version of model for EDP * Implement to\_dict() method for Resource * Added basic helper for map-reduce actions * Bump version to 0.3 * Conductor impr for tenants and templates * Create DB tables on Savanna start * Implement object classes for Conductor * Unit test for Conductor Manager improved * Refactoring remote utils * A Resource implementation for Conductor * Tests module refactoring * Fix docs build * Fix requests version * Unit Tests and fixes for Conductor Manager API * Add check S361 for imports of savanna.db module * Update requirements to the latest versions * Improve coverage calculation * Created savanna-db-manage script for new DB * Added validation checks to HDP plugin * Workflow creator * Conductor methods added * Docs build fixed * Fix foreign keys and table names in new model * Move path manipulations into function * Fix Ganglia service start failure * Fix processing cluster configs in HDP plugin * Fix to convert parsing failure * Port sqlalchemy db models to conductor * Initial part of conductor implementation * Resolves critical issue with oozie service * Ambari install screen after install fix * Fix to OpenStack utils * Add changing owner a private key * Enforce hacking >=0.6.0 * Fix using nova\_info in HDP plugin * Added a first version of REST client for Oozie * Fix bool default values in HDP plugin * Integrate Oozie deployment for Vanilla Plugin * Removed extra from Node Group * Docs fixed for horizon * Allow sqlalchemy 0.8.X * Now swift config is not passed if Swift disable * Fix contributing.rst file * Move requirements files to the common place * Now it is possible create a hadoop config without property filter * Added REST API for job and data source with simple validation * Validate image tags to contain required tags for plugin * Docs improvements * Refactoring db module * Added REST API skeleton for EDP component * Fixes issue with ng names duplicates * Instance remote usage refactoring 0.2.1.rc1 --------- * Image Registry tags validation * Fix delete templates that are in use * Added integration test for cluster scaling * Refactoring unit tests for validation * Fix a bug in integration tests * Use console\_scripts instead of bin * Fix HDP plugin should register service urls * Oslo has been updated * Licence header added to tools/get\_auth\_token.py * Add HDP plugin to default plugins * Allow hacking 0.6.0 and fix errors from new checks * Cluster scaling bug fixing: * Added \_\_init\_\_.py to migration directory * Cluster scaling improvement * Cluster scaling bug fixing * Documents typo fixes * Unit tests for scaling validation * Cluster scaling bug fixing * Skipping non-existing instances while deletion * Cluster scaling: deletion * Status description is set on errors * Fix sqlalchemy CompileError * Add cinder validation * Validation exceptions handling improved * REST API returns traceback fix * Added config tests * Minor addition to installation guide 0.2 --- * Remove autoindex * Fix several sphinx bugs and include autoindex * Added details on registering images * Some more last-minute changes to docs * Details on enabling Anti-Affinity * Add cinder features in documentation * README and docs has been updated * Some minor changes * Docs SPI header fixed * Initial implementation of HDP plugin 0.2.rc2 ------- * Fix author/homepage in setup.py * Fix install guide to fedora and centos * Reworked installation guides * Unit tests for savanna validation * Updated development guidelines * Plugin page is added * Added improvement to code for swift test * Minor changes in documentation * Docs feature page * Docs for Jenkins page updated * Docs fixed for horizon dev istallation * Docs fixed for horizon installation * Docs for Disk Imge Builder fixed * Docs feature page * Docs for Jenkins ci added * Refactoring and changing savanna documentation 0.2.rc1 ------- * Validation checks improvements 0.2a2 ----- * Cluster scaling validation added * User's Perspective updated on overview page * AUTHORS file generation fixed * Validation added for missed scale/convert impl * Stubs for plugin and scaling ops added * Added more info into Templates section of UserGuide * Added docs for DiskImageBuilder * Api validator is now passes all api args to validators * Change default port for savanna api to 8386 * Changing default for os\_auth\_host to 127.0.0.1 * Documentation update for REST API * Cosmetic changes in the docs * SPI documentation updated * Add doc about how to write docs * The starting page for User Guide is done * Fixes AA schema defenitions in clusters and cluster templates * Revert Ilya Tyaptin to AUTHORS * Support for 'Co-Authored-By' fixed * Added swift itest and improvements to test code * Req/resp exchange logging is now configurable * Help messages for savanna-api configs improved * Database schema for 0.2 release added * Internal error message fixed * Added plugins overview for Dev Guide * Revert "unit tests for "Implemention manual scaling"" * Support of different content types cleaned * Added plugin configuration checks to validate methods * Add attaching/detaching volume unit tests * Context helper improved, avoid 500 instead of 404 * Improve context.set\_ctx to not fail * Reset context before/after request handling * The 'model\_update' helper has been added * unit tests for "Implemention manual scaling" * Updated quickstart guide * Some logging added to cluster provisioning * REST API validation implementation * Add support attach cinder volume to scale cluster * Added improvements to test code * Python 3 print check added * Updated project docs design * Add description and template id during Cluster creation * Wrote installation guide for Savanna * Added improvements to test for image registry * Updated guide for dev environment * Next gen AA implemented and small cleanup * Savanna Dashboard installation guide updated * Cluster scaling bug fixing * Restructured project documentation * UI dev guide updated * Cluster scaling: validation 0.2a1 ----- * Add attaching and detaching cinder volumes * Anti affinitity group field name fixed in validation schema * Plugin version exists check has been added * Remove dynamic serialization switching * Cluster scaling implementation * Private key for user hadoop in vanilla plugin * Rollback sitepackages fix for tox.ini * Fix version of pyflakes: pyflakes==0.7.2 * Fix pep8 and pycrypto versions, fix tox.ini * Preserve order of plugins taken from config * Added small correction to test code * Make 'Enable Swift' config in plugin priority 1 * Renamed MAPREDUCE service in plugin to MapReduce * Improvements of test for image registry * All validation schemas and functions prepared * Multi-tenancy support has been implemented * Added integration tests for cluster creation * Fix issue w/ setting service urls * Make all cluster-wide configs priority 1 in Vanilla plugin * Posargs has been added to the flake8 command * Type fixed in cinder client * Unnecessary logging removed from nova and cinder clients * Add request/response logging when debug=True * Oslo has been updated to the latest version * Requirements has been updated * ApiValidator tests moved to the right place (utils) * Fix cluster delete when instances are already deleted * The special type implemented for flavors * Fixed min volumes\_size constraint * License hacking tests has been added * The tenant\_id should not be specified in requests * NodeGroup creation request body schema validation * Type 'configs' implemented for ApiValidator * More strict images validation * Threading utils implemented * Placeholders for future validators and schemas * Basic schema validation added to images calls * The 'check\_exists' applied to all API calls * Added hadoop testing * If validation is not pass, cluster status is set to Error * Little isue with storage\_path generation fixed * Simple tests for utils/crypto * Place patches test to the right place * Base validation framework implemented * Avoid internal error while quering plugin * NotFoundException implemented * Implement \_map\_to\_user\_inputs helper * Plugins could return required image tags now * Fix minor plugin issue * MANIFEST.in has been added * Added itest and improvements to code of tests * Add info property to the Cluster object * XML coverage report added (cobertura) * Fix storage helper * NodeGroupTemplate conversion method fixed * The plugin's 'convert' method improved * Move base.py w/ unit tests to the tests root * Upgrade migration script to the latest model * Model has been updated * Use userdata instead of files for VM key-pair * Enchancement for instance interop helper * Initial migration script has been upgraded * Added cinder volumes support to vanilla plugin * Replaced all 'General' configs to 'general' * Add cover report to .gitignore * Added integration test for image registry * Heap Size can be applied for Hadoop services now * Defined Priority 1 and cluster configurations for Hadoop services * Applied Swift Integration in Vanilla Plugin: * Vanilla plugin configuration helper fixing: * Vanilla plugin configs are more informative now * Add cinderclient * Some changes were added to savanna-dashboard installation * Added integration crud tests * Update object model to support cinder volumes * Replace dumb reraise with reraise util * Now instances are deleted after rollback cluster creation * Unregister image rest api call has been added * Fix savanna.conf.sample * Move swift helper tests to the right place * Reraise exception about error during the instance creation * User keypair is now optional for cluster creation * Added fast integration test * Cluster creation moved in separate thread * Added first Savanna Controller level validation * Impl bulk ops for instance interop helper * Helper for Swift integration was added * Savanna context now is local to greenthread, not just thread * Add fqdn property to instance object * Reduce number of ssh sessions while scp muliple files * InstanceInteropHelper improvements * Conf samples has been updated * Update database defaults * Implementation of Vanilla Plugin * Print stacktrace if error occured while cluster creation * Improve cluster creation from cluster template * Support cluster creation from cluster template * Cluster templates could be now created using node group templates * REST API / (versions) endpoint has been fixed * Id of the NodeGroup is now hidden * Oslo libs has been updated * Basic impl of 'convert' method * Impl file upload for Savanna REST API utils * pbr updated to the latest version * The use\_floating\_ip flag implemented * Description is now optional in ImageRegistry * Sync tools/\*-requires with openstack/requirements * Apply minidom patch to Python prior to 2.7.3 * Use internal IPs in /etc/hosts * Sample conf fix * Images REST API cleanup * Plugin resource name fixed for REST API calls * Adding Nadya Privalova to AUTHORS * Cleanup tools/\*-requires * ImageRegistry completed * Correct todo messages * Fix for Dummy plugin * REST API samples updated * Small code improvements * Core part improvements * Pin pbr to avoid sphinx autodocs issues * Adding lintstack to support pylint testing * Documentation for Hadoop-Swift integration was added * Simple REST API call samples has been added * TemplatesRelation is now NodeGroup-like object * Plugin stub updated to the latest version of configs vision * Improve REST API bindings * instruction for dev env for horizone plugin * Adjust Config class to the docs * Enable all code style tests * Add simple plugin calls and cluster status updates * Small cleanup of db model * The 'model\_save' helper added to the context * Helper for configuration in node group * Cluster security and node placement control * The 'ctx' arg removed from plaggable provisioning * Fix remote util * Fix crypto util * Improve database model * Placeholder for instance creation has been added * Keystone auth middleware configuration fixed * User keypair added to cluster object * Remove unused variable * Patch novaclient to support getting keypairs * Introduce py33 to tox.ini * AUTHORS added to the repo * The .mailmap file updated to fix AUTHORS * Fix nova helpers (remove unneeded headers) * Hostname/username are now available in Instance * Use six to improve python 3 compatibility * Basic instance interop helpers added * Private key has been added to the Cluster object * Initial version of Savanna v0.2 0.1.2 ----- * Pre-release 0.1.2 doc changes * Replaced path to start-all.sh script * New hadoop tests were added * Small docs improvements * Integration tests improvements and fixes * Integration tests for hadoop were added * Removed unused paramter '-force' when formatting NameNode * Updated project documentation * .gitignore updated * Requires updated due to the openstack/requirements * Some improvements to documentation were added * Add changes in horizon docs * Revert "Integration tests for hadoop were added." * Integration tests for hadoop were added * cscope.out has been added to .gitignore * Change allow-cluster-ops default from False to True * bump version to 0.1.2 0.1.1 ----- * Pre-release 0.1.1 docs fixes * Cluster status fix when error during vms starting * Unnecessary whitespace has been removed 0.1.1a2 ------- * Patch for minidom's writexml has been added * Positive test for validation has been readded * The is\_node\_template\_associated function added * Added default values for JT, NN, TT, DN processes * NodeTemplate usage check moved to validation 0.1.1a1 ------- * "Last updated" info has been added to generated Sphinx pages * Common version is now used in Sphinx docs * Keystone client creation moved to the setUp() function * oslo has been updated * Adds xml hadoop config generating * Keystone removed from the global variables and added it to the class * time.sleep replaced with eventlet.sleep * Deps cleaned by openstack/requirements * Tenants support implemented for clusters * Using clear\_override in tearDown * docs fixed, tool renamed * Implements integration tests * OpenStack Common has been updated to the latest version * Some large (and slow) validation tests has been splitted to several cases * tools/install\_venv fixed * Index page updated * Validation for required process props added * /etc/hosts generator implemented * Additional info files added to repo * Re-add setuptools-git to setup.py * quickstart has been updated * All tools modev to tox * Validation tests fixed (jsonschema update) * OS Summit session nnouncement has been added * Limit cluster name to 50 characters * bump version to 0.1.1 * info about pypi has been added * Remove an invalid trove classifier * Horizon howto page updated and published 0.1 --- * setup.py has been improved * Some useful links added to README * Note about use\_floating\_ips has been added * Simple quickstart fix 0.1a2 ----- * setuptools-get has been removed from deps * AUTHORS and ChangeLog has been added to .gitignore * VM Image link has been fixed * Small index page improvement * Links to bugs and blueprints has been added * simple tests for cluster validation has been added 0.1a1 ----- * Added instruction how to get Savanna from tarball * sample-conf has been removed from savanna-manage * Added error codes to REST API docs * Trailing whitespaces has been removed from the validation messages * Side effect in SavannaTestCase has been fixed * oslo has been updated * HowToParticipiate page updated * Fixed issue when json's responses contain null values * Introduced new networking option for cluster * Fixed validation errors and wrong response codes * get\_auth\_token is now uses default configs * Added Nova resource checking in cluster creation operation * Implemented Hadoop config provisioning * resources has been added to sdist tarball * Exec permissions added to the savanna-manage command * savanna-manage added to the scripts section of setup.py * sample-conf command added to savanna-manage * Several fixes in tools and docs * Quickstart updated * SavannaTestCase added * Some hacking.py fixes and fixes in validation and cluster\_ops * hacking.py added * Tools has been improved * Service layer validation added * Tenant id is now extracted from headers; eq function added to api Resource class * Added basic validation and error handling * small refactoring - service and storage (dao) layers has been created * savanna-manage has been added; reset-db/gen-templates moved to it * Author email has been fixed * dev-conf is now supported * some confs cleanup, pyflakes added to tox * versions added to api, small api improvements * small cleanup * quickstart has been updated * docs has been updated * simple tox.ini has been added * unused config item has been removed * oslo.config is now available in pypi * renaming rollbacked to prevent problems with the old image * conf files moved to the right place * Add .gitreview file * mailing list address has been fixed * Changed pictures in docs according to Savanna name and replaced Horizon pages * Changed docs with replacement of EHO to Savanna * eho -> savanna * .mailmap fixed * .pylintrc improved * oslo conf has been updated * Build docs is now implemented using setup.py * unused arg has been removed * oslo upgraded * sample confs has been improved * logging of defaults generator has been cleaned * plain py logging replaced with oslo log * conf-print has been removed * get\_auth\_token has been fixed * stollen files has been moved to openstack package * tests runner has been fixed * unused configs has been removed * refactoring: eho.server -> eho * unused option dev removed; analyze\_opts.py removed; eho.conf.sample updated * some cleanups, tests fixed * oslo context has been added * oslo-config has been upgraded to the latest version * EHO-Horizon Setup instruction is added * Switched from self-made config to oslo.config * htp site page fixed * Corrected link in how-to-participate * tenant\_id has been removed from tests * Added bullet point for base\_image\_id in Item 4 * Polished Quick Start guide a little * small fix * some mistakes has been fixed * Added 'How to Participate' page to the docs * sources and launchpad links has been added * quickstart link has been added * quickstart has been added * Enhanced get\_auth\_token: It can get credentials and tenant from console It could be launched from any directory, not just project root * Made note in docs that we use flavor name instead of flavor id temporarily * SQLAlchemy version has been specified (>=0.7,<0.8a0) * tenant\_id has been removed from objects * run command added to README * Corrected examples in API docs * custom horizon screenshots has been added to docs * roadmap has been updated * default node\_password has been changed * Corrected API docs * if content type is undefined json should be applied * xml requests deserialization has been disabled * xml requests and responses are now supported * some oslo modules has been added * copyright has been added * cleaned * setup utils is now from oslo-incubator * .mailmap has been added * test job has been disabled * objects has been wrapped and tenants are now passed in urls and validated before app * Inserted {tenant\_id} into urls in API docs * setup.py has been added * "stolen" comment has been added * tenant\_id is now taken from headers * docs has been fixed * restapi doc has been upgraded to fit new tenant\_id style * using specifed tenant\_id * comment about tenant check has been added * apidocs generation has been disabled * docs has been updated * auth token creation helper has been added * unnecessary lambda usage has been removed * tests has been fixed to fit added auth token middleware * wsgi middlewares are now added correctly * test has been improved * missing webob dep has been added * horizon token auth is now used * openstack interop helper has been added * Now we print exceptions with stacktraces into log * bug with eternally stoping cluster in case of stoped vms has been fixed * doc has been fixed * configs has been fixed * stop\_cluster clusterop has been mocked for tests * service\_urls has been fixed (dict instead of array of dicts) * using conf files instead of hardcoded values * using conf dicts instead of global statements * REST API has been updated to v0.2 * Fixed pep8 error * Fixed VMs networking * Now we use hostnames for addressing between VMs Fixed network discovery - now we correctly identify public interface Little renaming + spell fixes * Code has been reformatted * Some pylint warns has been fixed * All docs has been ported to sphinx * Fixed pep8 and tests * Working version without coroutines * api methods has been splitted and some warns has been fixed * some warnings has been fixed * vm termination implemented * tests has been fixed * allow cluster ops flag added * warnings has been fixed * nodes are now sorted before assertEquals while creating clusters * api test has been upgraded * logging added * todo added * some fixes, clusterops are now starting using eventlet * warnings has been fixed * Added jinja templates for startup scripts * todos reformatted * Update README.rst * api test has been updated to use new defaults * pep8 has been fixed * many pylint warns has been fixed * pyflakes warnings has been fixed * readme has been updated * pylint and pyflakes static analysis has been added * sample test has been removed * Extracted string constants in cluster\_ops * add tests for delete cluster and node template * clusterops now is pep8 compliant * traceback removed * Working version of cluster deployment * defaults has been updated * test\_api -> test\_api\_v01 * some tests has been added * Cluster statuses has been added * Minor changes * may be we should move configs to the 'configs' sub-object for templates get/list responses * deletions has been added into the rest api * service api improvements (termination, nodes creation, etc) * cascade options has been added * README has been updated * Initial implementation of cluster ops. Not working yet :-) * test\_api has been updated * python style names has been reverted * new defaults is now used * RESET\_DB flag is now supported * args has been updated * new args has been added * example routines has been added for cluster creation * patching all main components * only wsgi mode now used * defaults has been updated * some cli args has been added, logging is now configurable * background execution support has been added * default conf has been cleaned * --with-xunit added to run\_tests * Readme didn't mention that you need to install a couple of dependencies first * nosetests.xml added to .gitignore * simple api test has been added * conf improved * debug=True has been removed from bin/eho-api * \*.db added to .gitignore * Readme updated * tests, coverage added * note about hooks added * incorrect scheduler call has been removed * bin added * Some fixes * Initial implementation of REST API * install\_venv fixed * Initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/HACKING.rst0000664000175000017500000000241700000000000014662 0ustar00zuulzuul00000000000000Sahara Style Commandments ========================= - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Sahara Specific Commandments ---------------------------- Commit Messages --------------- Using a common format for commit messages will help keep our git history readable. Follow these guidelines: - [S365] First, provide a brief summary of 50 characters or less. Summaries of greater than 72 characters will be rejected by the gate. - [S364] The first line of the commit message should provide an accurate description of the change, not just a reference to a bug or blueprint. Imports ------- - [S366, S367] Organize your imports according to the ``Import order`` Dictionaries/Lists ------------------ - [S360] Ensure default arguments are not mutable. - [S368] Must use a dict comprehension instead of a dict constructor with a sequence of key-value pairs. For more information, please refer to http://legacy.python.org/dev/peps/pep-0274/ Logs ---- - [S373] Don't translate logs - [S374] You used a deprecated log level Importing json -------------- - [S375] It's more preferable to use ``jsonutils`` from ``oslo_serialization`` instead of ``json`` for operating with ``json`` objects. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/LICENSE0000664000175000017500000002363600000000000014077 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.789891 sahara-16.0.0/PKG-INFO0000664000175000017500000000411000000000000014151 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: sahara Version: 16.0.0 Summary: Sahara project Home-page: https://docs.openstack.org/sahara/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: Apache Software License Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/sahara.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on OpenStack Data Processing ("Sahara") project ============================================ Sahara at wiki.openstack.org: https://wiki.openstack.org/wiki/Sahara Storyboard project: https://storyboard.openstack.org/#!/project/935 Sahara docs site: https://docs.openstack.org/sahara/latest/ Roadmap: https://wiki.openstack.org/wiki/Sahara/Roadmap Quickstart guide: https://docs.openstack.org/sahara/latest/user/quickstart.html How to participate: https://docs.openstack.org/sahara/latest/contributor/how-to-participate.html Source: https://opendev.org/openstack/sahara Bugs and feature requests: https://storyboard.openstack.org/#!/project/935 Release notes: https://docs.openstack.org/releasenotes/sahara/ License ------- Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 Platform: UNKNOWN Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/README.rst0000664000175000017500000000203600000000000014550 0ustar00zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/sahara.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on OpenStack Data Processing ("Sahara") project ============================================ Sahara at wiki.openstack.org: https://wiki.openstack.org/wiki/Sahara Storyboard project: https://storyboard.openstack.org/#!/project/935 Sahara docs site: https://docs.openstack.org/sahara/latest/ Roadmap: https://wiki.openstack.org/wiki/Sahara/Roadmap Quickstart guide: https://docs.openstack.org/sahara/latest/user/quickstart.html How to participate: https://docs.openstack.org/sahara/latest/contributor/how-to-participate.html Source: https://opendev.org/openstack/sahara Bugs and feature requests: https://storyboard.openstack.org/#!/project/935 Release notes: https://docs.openstack.org/releasenotes/sahara/ License ------- Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.585891 sahara-16.0.0/api-ref/0000775000175000017500000000000000000000000014403 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.609891 sahara-16.0.0/api-ref/source/0000775000175000017500000000000000000000000015703 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/conf.py0000664000175000017500000001472400000000000017212 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # sahara documentation build configuration file, created Fri May 6 15:19:20 # 2016. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys extensions = [ 'os_api_ref', 'openstackdocstheme' ] # openstackdocstheme options repository_name = 'openstack/sahara' use_storyboard = True html_theme = 'openstackdocs' html_theme_options = { "sidebar_dropdown": "api_ref", "sidebar_mode": "toc", } # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2010-present, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # The reST default role (used for this markup: `text`) to use # for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for man page output ---------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', 'title', 'Authors name', 'manual' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'saharaoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Sahara.tex', 'OpenStack Data Processing API Documentation', 'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/index.rst0000664000175000017500000000027100000000000017544 0ustar00zuulzuul00000000000000=================== Data Processing API =================== Contents: API content can be searched using the :ref:`search`. .. toctree:: :maxdepth: 2 v1.1/index v2/index ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.6138911 sahara-16.0.0/api-ref/source/v1.1/0000775000175000017500000000000000000000000016370 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/cluster-templates.inc0000664000175000017500000001202200000000000022535 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Cluster templates ================= A cluster template configures a Hadoop cluster. A cluster template lists node groups with the number of instances in each group. You can also define cluster-scoped configurations in a cluster template. Show cluster template details ============================= .. rest_method:: GET /v1.1/{project_id}/cluster-templates/{cluster_template_id} Shows details for a cluster template. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_template_id: url_cluster_template_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - tenant_id: tenant_id - node_groups: node_groups - is_public: object_is_public - hadoop_version: hadoop_version - id: cluster_template_id - name: cluster_template_name Response Example ---------------- .. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json :language: javascript Update cluster templates ======================== .. rest_method:: PUT /v1.1/{project_id}/cluster-templates/{cluster_template_id} Updates a cluster template. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_template_id: cluster_template_id Request Example --------------- .. literalinclude:: samples/cluster-templates/cluster-template-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - tenant_id: tenant_id - node_groups: node_groups - is_public: object_is_public - hadoop_version: hadoop_version - id: cluster_template_id - name: cluster_template_name Delete cluster template ======================= .. rest_method:: DELETE /v1.1/{project_id}/cluster-templates/{cluster_template_id} Deletes a cluster template. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_template_id: cluster_template_id List cluster templates ====================== .. rest_method:: GET /v1.1/{project_id}/cluster-templates Lists available cluster templates. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_cluster_templates Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - tenant_id: tenant_id - node_groups: node_groups - is_public: object_is_public - hadoop_version: hadoop_version - id: cluster_template_id - name: cluster_template_name Response Example ---------------- .. rest_method:: GET /v1.1/{project_id}/cluster-templates?limit=2 .. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json :language: javascript Create cluster templates ======================== .. rest_method:: POST /v1.1/{project_id}/cluster-templates Creates a cluster template. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: project_id Request Example --------------- .. literalinclude:: samples/cluster-templates/cluster-template-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - tenant_id: tenant_id - node_groups: node_groups - is_public: object_is_public - hadoop_version: hadoop_version - id: cluster_template_id - name: cluster_template_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/clusters.inc0000664000175000017500000001303000000000000020724 0ustar00zuulzuul00000000000000.. -*- rst -*- ======== Clusters ======== A cluster is a group of nodes with the same configuration. List available clusters ======================= .. rest_method:: GET /v1.1/{project_id}/clusters Lists available clusters. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_clusters Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - clusters: clusters - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Response Example ---------------- .. rest_method:: GET /v1.1/{project_id}/clusters .. literalinclude:: samples/clusters/clusters-list-response.json :language: javascript Create cluster ============== .. rest_method:: POST /v1.1/{project_id}/clusters Creates a cluster. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id Request Example --------------- .. literalinclude:: samples/clusters/cluster-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Create multiple clusters ======================== .. rest_method:: POST /v1.1/{project_id}/clusters/multiple Creates multiple clusters. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id Request Example --------------- .. literalinclude:: samples/clusters/multiple-clusters-create-request.json :language: javascript Show details of a cluster ========================= .. rest_method:: GET /v1.1/{project_id}/clusters/{cluster_id} Shows details for a cluster, by ID. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_id: url_cluster_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Response Example ---------------- .. literalinclude:: samples/clusters/cluster-show-response.json :language: javascript Delete a cluster ================ .. rest_method:: DELETE /v1.1/{project_id}/clusters/{cluster_id} Deletes a cluster. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_id: url_cluster_id Scale cluster ============= .. rest_method:: PUT /v1.1/{project_id}/clusters/{cluster_id} Scales a cluster. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_id: cluster_id Request Example --------------- .. literalinclude:: samples/clusters/cluster-scale-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Update cluster ============== .. rest_method:: PATCH /v1.1/{project_id}/clusters/{cluster_id} Updates a cluster. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_id: url_cluster_id Request Example --------------- .. literalinclude:: samples/clusters/cluster-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Show progress ============= .. rest_method:: GET /v1.1/{project_id}/clusters/{cluster_id} Shows provisioning progress for a cluster. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_id: url_cluster_id Response Example ---------------- .. literalinclude:: samples/event-log/cluster-progress-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/data-sources.inc0000664000175000017500000000660600000000000021465 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ Data sources ============ A data source object defines the location of input or output for MapReduce jobs and might reference different types of storage. The Data Processing service does not validate data source locations. Show data source details ======================== .. rest_method:: GET /v1.1/{project_id}/data-sources/{data_source_id} Shows details for a data source. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - data_source_id: url_data_source_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: data_source_description - url: url - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - type: type - id: data_source_id - name: data_source_name Response Example ---------------- .. literalinclude:: samples/data-sources/data-source-show-response.json :language: javascript Delete data source ================== .. rest_method:: DELETE /v1.1/{project_id}/data-sources/{data_source_id} Deletes a data source. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - data_source_id: url_data_source_id Update data source ================== .. rest_method:: PUT /v1.1/{project_id}/data-sources/{data_source_id} Updates a data source. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - data_source_id: url_data_source_id Request Example --------------- .. literalinclude:: samples/data-sources/data-source-update-request.json :language: javascript List data sources ================= .. rest_method:: GET /v1.1/{project_id}/data-sources Lists all data sources. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_data_sources Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - description: data_source_description - url: url - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - type: type - id: data_source_id - name: data_source_name Response Example ---------------- .. rest_method:: GET /v1.1/{project_id}/data-sourses?sort_by=-name .. literalinclude:: samples/data-sources/data-sources-list-response.json :language: javascript Create data source ================== .. rest_method:: POST /v1.1/{project_id}/data-sources Creates a data source. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id Request Example --------------- .. literalinclude:: samples/data-sources/data-source-register-hdfs-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: data_source_description - url: url - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - type: type - id: data_source_id - name: data_source_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/event-log.inc0000664000175000017500000000116500000000000020766 0ustar00zuulzuul00000000000000.. -*- rst -*- ========= Event log ========= The event log feature provides information about cluster provisioning. In the event of errors, the event log shows the reason for the failure. Show progress ============= .. rest_method:: GET /v1.1/{project_id}/clusters/{cluster_id} Shows provisioning progress of cluster. Normal response codes: 200 Error response codes: Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - cluster_id: cluster_id Response Example ---------------- .. literalinclude:: samples/event-log/cluster-progress-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/image-registry.inc0000664000175000017500000000753400000000000022024 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== Image registry ============== Use the image registry tool to manage images, add tags to and remove tags from images, and define the user name for an instance operating system. Each plugin lists required tags for an image. To run remote operations, the Data Processing service requires a user name with which to log in to the operating system for an instance. Add tags to image ================= .. rest_method:: POST /v1.1/{project_id}/images/{image_id}/tag Adds tags to an image. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - tags: tags - image_id: url_image_id Request Example --------------- .. literalinclude:: samples/image-registry/image-tags-add-request.json :language: javascript Show image details ================== .. rest_method:: GET /v1.1/{project_id}/images/{image_id} Shows details for an image. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - image_id: url_image_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - username: username - updated: updated - description: image_description - created: created - image: image - tags: tags - minDisk: minDisk - name: image_name - progress: progress - minRam: minRam - id: image_id - metadata: metadata Response Example ---------------- .. literalinclude:: samples/image-registry/image-show-response.json :language: javascript Register image ============== .. rest_method:: POST /v1.1/{project_id}/images/{image_id} Registers an image in the registry. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - username: username - description: image_description - image_id: url_image_id Request Example --------------- .. literalinclude:: samples/image-registry/image-register-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - username: username - updated: updated - description: image_description - created: created - image: image - tags: tags - minDisk: minDisk - name: image_name - progress: progress - minRam: minRam - id: image_id - metadata: metadata Unregister image ================ .. rest_method:: DELETE /v1.1/{project_id}/images/{image_id} Removes an image from the registry. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - image_id: url_image_id Remove tags from image ====================== .. rest_method:: POST /v1.1/{project_id}/images/{image_id}/untag Removes tags from an image. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - tags: tags - image_id: url_image_id Request Example --------------- .. literalinclude:: samples/image-registry/image-tags-delete-request.json :language: javascript List images =========== .. rest_method:: GET /v1.1/{project_id}/images Lists all images registered in the registry. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - tags: tags Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - username: username - updated: updated - description: image_description - created: created - image: image - tags: tags - minDisk: minDisk - name: image_name - images: images - progress: progress - minRam: minRam - id: image_id - metadata: metadata Response Example ---------------- .. literalinclude:: samples/image-registry/images-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/index.rst0000664000175000017500000000073200000000000020233 0ustar00zuulzuul00000000000000:tocdepth: 3 ------------------------ Data Processing API v1.1 ------------------------ .. rest_expand_all:: .. include:: cluster-templates.inc .. include:: clusters.inc .. include:: data-sources.inc .. include:: event-log.inc .. include:: image-registry.inc .. include:: job-binaries.inc .. include:: job-executions.inc .. include:: job-types.inc .. include:: job-binary-internals.inc .. include:: jobs.inc .. include:: node-group-templates.inc .. include:: plugins.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/job-binaries.inc0000664000175000017500000001031700000000000021431 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ Job binaries ============ Job binary objects represent data processing applications and libraries that are stored in either the internal database or the Object Storage service. List job binaries ================= .. rest_method:: GET /v1.1/{project_id}/job-binaries Lists the available job binaries. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_job_binary Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - description: job_binary_description - url: url - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - binaries: binaries - id: job_binary_id - name: job_binary_name Response Example ---------------- .. rest_method:: GET /v1.1/{project_id}/job-binaries?sort_by=created_at .. literalinclude:: samples/job-binaries/list-response.json :language: javascript Create job binary ================= .. rest_method:: POST /v1.1/{project_id}/job-binaries Creates a job binary. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id Request Example --------------- .. literalinclude:: samples/job-binaries/create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_binary_description - url: url - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - id: job_binary_id - name: job_binary_name Show job binary details ======================= .. rest_method:: GET /v1.1/{project_id}/job-binaries/{job_binary_id} Shows details for a job binary. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_id: url_job_binary_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_binary_description - url: url - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - id: job_binary_id - name: job_binary_name Response Example ---------------- .. literalinclude:: samples/job-binaries/show-response.json :language: javascript Delete job binary ================= .. rest_method:: DELETE /v1.1/{project_id}/job-binaries/{job_binary_id} Deletes a job binary. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_id: url_job_binary_id Update job binary ================= .. rest_method:: PUT /v1.1/{project_id}/job-binaries/{job_binary_id} Updates a job binary. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_id: url_job_binary_id Request Example --------------- .. literalinclude:: samples/job-binaries/update-request.json :language: javascript Show job binary data ==================== .. rest_method:: GET /v1.1/{project_id}/job-binaries/{job_binary_id}/data Shows data for a job binary. The response body shows the job binary raw data and the response headers show the data length. Example response: :: HTTP/1.1 200 OK Connection: keep-alive Content-Length: 161 Content-Type: text/html; charset=utf-8 Date: Sat, 28 Mar 2016 02:42:48 GMT A = load '$INPUT' using PigStorage(':') as (fruit: chararray); B = foreach A generate com.hadoopbook.pig.Trim(fruit); store B into '$OUTPUT' USING PigStorage(); Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_id: url_job_binary_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - Content-Length: Content-Length Response Example ---------------- .. literalinclude:: samples/job-binaries/show-data-response :language: text ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/job-binary-internals.inc0000664000175000017500000001126400000000000023120 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================== Job binary internals ==================== Job binary internal objects represent data processing applications and libraries that are stored in the internal database. Create job binary internal ========================== .. rest_method:: PUT /v1.1/{project_id}/job-binary-internals/{name} Creates a job binary internal. Job binary internals are objects that represent data processing applications and libraries that are stored in the internal database. Specify the file contents (raw data or script text) in the request body. Specify the file name in the URI. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - name: url_job_binary_internals_name Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: job_binary_internals_name - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - datasize: datasize - id: job_binary_internals_id Show job binary internal data ============================= .. rest_method:: GET /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id}/data Shows data for a job binary internal. The response body shows the job binary raw data and the response headers show the data length. Example response: :: HTTP/1.1 200 OK Connection: keep-alive Content-Length: 161 Content-Type: text/html; charset=utf-8 Date: Sat, 28 Mar 2016 02:21:13 GMT A = load '$INPUT' using PigStorage(':') as (fruit: chararray); B = foreach A generate com.hadoopbook.pig.Trim(fruit); store B into '$OUTPUT' USING PigStorage(); Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_internals_id: url_job_binary_internals_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - Content-Length: Content-Length Response Example ---------------- .. literalinclude:: samples/job-binary-internals/show-data-response :language: text Show job binary internal details ================================ .. rest_method:: GET /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id} Shows details for a job binary internal. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_internals_id: url_job_binary_internals_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - name: job_binary_internals_name - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - datasize: datasize - id: job_binary_internals_id Response Example ---------------- .. literalinclude:: samples/job-binary-internals/show-response.json :language: javascript Delete job binary internal ========================== .. rest_method:: DELETE /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id} Deletes a job binary internal. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_internals_id: url_job_binary_internals_id Update job binary internal ========================== .. rest_method:: PATCH /v1.1/{project_id}/job-binary-internals/{job_binary_internals_id} Updates a job binary internal. Normal respose codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_binary_internals_id: url_job_binary_internals_id Request Example --------------- .. literalinclude:: samples/job-binary-internals/update-request.json :language: javascript List job binary internals ========================= .. rest_method:: GET /v1.1/{project_id}/job-binary-internals Lists the available job binary internals. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_job_binary_internals Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - binaries: binaries - name: job_binary_internals_name - tenant_id: tenant_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - datasize: datasize - id: job_binary_internals_id Response Example ---------------- .. rest_method:: GET /v1.1/{project_id}/job-binary-internals .. literalinclude:: samples/job-binary-internals/list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/job-executions.inc0000664000175000017500000001376400000000000022034 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== Job executions ============== A job execution object represents a Hadoop job that runs on a cluster. A job execution polls the status of a running job and reports it to the user. Also a user can cancel a running job. Refresh job execution status ============================ .. rest_method:: GET /v1.1/{project_id}/job-executions/{job_execution_id}/refresh-status Refreshes the status of and shows information for a job execution. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_execution_id: url_job_execution_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - info: info - output_id: output_id - start_time: start_time - job_id: job_id - updated_at: updated_at - tenant_id: tenant_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_execution_is_public - input_id: input_id - configs: configs - job_execution: job_execution - id: job_execution_id Response Example ---------------- .. literalinclude:: samples/job-executions/job-ex-response.json :language: javascript List job executions =================== .. rest_method:: GET /v1.1/{project_id}/job-executions Lists available job executions. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_job_execution Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - info: info - output_id: output_id - start_time: start_time - job_id: job_id - updated_at: updated_at - tenant_id: tenant_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_execution_is_public - input_id: input_id - configs: configs - job_execution: job_execution - id: job_execution_id - job_executions: job_executions Response Example ---------------- .. rest_method:: /v1.1/{project_id}/job-executions .. literalinclude:: samples/job-executions/list-response.json :language: javascript Show job execution details ========================== .. rest_method:: GET /v1.1/{project_id}/job-executions/{job_execution_id} Shows details for a job execution, by ID. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_execution_id: url_job_execution_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - info: info - output_id: output_id - start_time: start_time - job_id: job_id - updated_at: updated_at - tenant_id: tenant_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_execution_is_public - input_id: input_id - configs: configs - job_execution: job_execution - id: job_execution_id Response Example ---------------- .. literalinclude:: samples/job-executions/job-ex-response.json :language: javascript Delete job execution ==================== .. rest_method:: DELETE /v1.1/{project_id}/job-executions/{job_execution_id} Deletes a job execution. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_execution_id: url_job_execution_id Update job execution ==================== .. rest_method:: PATCH /v1.1/{project_id}/job-executions/{job_execution_id} Updates a job execution. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_execution_id: url_job_execution_id Request Example --------------- .. literalinclude:: samples/job-executions/job-ex-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - info: info - output_id: output_id - start_time: start_time - job_id: job_id - updated_at: updated_at - tenant_id: tenant_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_execution_is_public - input_id: input_id - configs: configs - job_execution: job_execution - id: job_execution_id Cancel job execution ==================== .. rest_method:: GET /v1.1/{project_id}/job-executions/{job_execution_id}/cancel Cancels a job execution. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_execution_id: url_job_execution_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - info: info - output_id: output_id - start_time: start_time - job_id: job_id - updated_at: updated_at - tenant_id: tenant_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_execution_is_public - input_id: input_id - configs: configs - job_execution: job_execution - id: job_execution_id Response Example ---------------- .. literalinclude:: samples/job-executions/cancel-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/job-types.inc0000664000175000017500000000207000000000000020776 0ustar00zuulzuul00000000000000.. -*- rst -*- ========= Job types ========= Each plugin that supports EDP also supports specific job types. Different versions of a plugin might actually support different job types. Configuration options vary by plugin, version, and job type. The job types provide information about which plugins support which job types and how to configure the job types. List job types ============== .. rest_method:: GET /v1.1/{project_id}/job-types Lists all job types. You can use query parameters to filter the response. Normal response codes: 200 Error response codes: Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - plugin: plugin - version: version - type: type - hints: hints Response Parameters ------------------- .. rest_parameters:: parameters.yaml - versions: versions - title: title - description: description_plugin - job_types: job_types - name: plugin_name Response Example ---------------- .. literalinclude:: samples/job-types/job-types-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/jobs.inc0000664000175000017500000000755000000000000020027 0ustar00zuulzuul00000000000000.. -*- rst -*- ==== Jobs ==== A job object lists the binaries that a job needs to run. To run a job, you must specify data sources and job parameters. You can run a job on an existing or new transient cluster. Run job ======= .. rest_method:: POST /v1.1/{project_id}/jobs/{job_id}/execute Runs a job. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_id: url_job_id Request Example --------------- .. literalinclude:: samples/jobs/job-execute-request.json :language: javascript List jobs ========= .. rest_method:: GET /v1.1/{project_id}/jobs Lists all jobs. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_jobs Response Parameters ------------------- .. rest_parameters:: parameters.yaml - jobs: jobs - description: job_description - tenant_id: tenant_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_id - name: job_name - markers: markers - prev: prev - next: next Response Example ---------------- ..rest_method:: GET /v1.1/{project_id}/jobs?limit=2 .. literalinclude:: samples/jobs/jobs-list-response.json :language: javascript Create job ========== .. rest_method:: POST /v1.1/{project_id}/jobs Creates a job object. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id Request Example --------------- .. literalinclude:: samples/jobs/job-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_description - tenant_id: tenant_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_id - name: job_name Show job details ================ .. rest_method:: GET /v1.1/{project_id}/jobs/{job_id} Shows details for a job. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_id: url_job_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_description - tenant_id: tenant_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_id - name: job_name Response Example ---------------- .. literalinclude:: samples/jobs/job-show-response.json :language: javascript Remove job ========== .. rest_method:: DELETE /v1.1/{project_id}/jobs/{job_id} Removes a job. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_id: url_job_id Update job object ================= .. rest_method:: PATCH /v1.1/{project_id}/jobs/{job_id} Updates a job object. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - job_id: url_job_id Request Example --------------- .. literalinclude:: samples/jobs/job-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_description - tenant_id: tenant_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_id - name: job_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/node-group-templates.inc0000664000175000017500000001410000000000000023132 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================== Node group templates ==================== A cluster is a group of nodes with the same configuration. A node group template configures a node in the cluster. A template configures Hadoop processes and VM characteristics, such as the number of reduced slots for task tracker, the number of CPUs, and the amount of RAM. The template specifies the VM characteristics through an OpenStack flavor. List node group templates ========================= .. rest_method:: GET /v1.1/{project_id}/node-group-templates Lists available node group templates. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - limit: limit - marker: marker - sort_by: sort_by_node_group_templates Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - volume_local_to_instance: volume_local_to_instance - availability_zone: availability_zone - updated_at: updated_at - use_autoconfig: use_autoconfig - volumes_per_node: volumes_per_node - id: node_group_template_id - security_groups: security_groups - shares: object_shares - node_configs: node_configs - auto_security_group: auto_security_group - volumes_availability_zone: volumes_availability_zone - description: node_group_template_description - volume_mount_prefix: volume_mount_prefix - plugin_name: plugin_name - floating_ip_pool: floating_ip_pool - is_default: is_default - image_id: image_id - volumes_size: volumes_size - is_proxy_gateway: is_proxy_gateway - is_public: object_is_public - hadoop_version: hadoop_version - name: node_group_template_name - tenant_id: tenant_id - created_at: created_at - volume_type: volume_type - is_protected: object_is_protected - node_processes: node_processes - flavor_id: flavor_id Response Example ---------------- .. rest_method:: GET /v1.1/{project_id}/node-group-templates?limit=2&marker=38b4e146-1d39-4822-bad2-fef1bf304a52&sort_by=name .. literalinclude:: samples/node-group-templates/node-group-templates-list-response.json :language: javascript Create node group template ========================== .. rest_method:: POST /v1.1/{project_id}/node-group-templates Creates a node group template. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id Request Example --------------- .. literalinclude:: samples/node-group-templates/node-group-template-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_local_to_instance: volume_local_to_instance - availability_zone: availability_zone - updated_at: updated_at - use_autoconfig: use_autoconfig - volumes_per_node: volumes_per_node - id: node_group_template_id - security_groups: security_groups - shares: object_shares - node_configs: node_configs - auto_security_group: auto_security_group - volumes_availability_zone: volumes_availability_zone - description: node_group_template_description - volume_mount_prefix: volume_mount_prefix - plugin_name: plugin_name - floating_ip_pool: floating_ip_pool - is_default: is_default - image_id: image_id - volumes_size: volumes_size - is_proxy_gateway: is_proxy_gateway - is_public: object_is_public - hadoop_version: hadoop_version - name: node_group_template_name - tenant_id: tenant_id - created_at: created_at - volume_type: volume_type - is_protected: object_is_protected - node_processes: node_processes - flavor_id: flavor_id Show node group template details ================================ .. rest_method:: GET /v1.1/{project_id}/node-group-templates/{node_group_template_id} Shows a node group template, by ID. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - node_group_template_id: url_node_group_template_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_local_to_instance: volume_local_to_instance - availability_zone: availability_zone - updated_at: updated_at - use_autoconfig: use_autoconfig - volumes_per_node: volumes_per_node - id: node_group_template_id - security_groups: security_groups - shares: object_shares - node_configs: node_configs - auto_security_group: auto_security_group - volumes_availability_zone: volumes_availability_zone - description: node_group_template_description - volume_mount_prefix: volume_mount_prefix - plugin_name: plugin_name - floating_ip_pool: floating_ip_pool - is_default: is_default - image_id: image_id - volumes_size: volumes_size - is_proxy_gateway: is_proxy_gateway - is_public: object_is_public - hadoop_version: hadoop_version - name: node_group_template_name - tenant_id: tenant_id - created_at: created_at - volume_type: volume_type - is_protected: object_is_protected - node_processes: node_processes - flavor_id: flavor_id Response Example ---------------- .. literalinclude:: samples/node-group-templates/node-group-template-show-response.json :language: javascript Delete node group template ========================== .. rest_method:: DELETE /v1.1/{project_id}/node-group-templates/{node_group_template_id} Deletes a node group template. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - node_group_template_id: url_node_group_template_id Update node group template ========================== .. rest_method:: PUT /v1.1/{project_id}/node-group-templates/{node_group_template_id} Updates a node group template. Normal respose codes:202 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - node_group_template_id: url_node_group_template_id Request Example --------------- .. literalinclude:: samples/node-group-templates/node-group-template-update-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/parameters.yaml0000664000175000017500000005661400000000000021433 0ustar00zuulzuul00000000000000# variables in header Content-Length: description: | The length of the data, in bytes. in: header required: true type: string # variables in path hints: description: | Includes configuration hints in the response. in: path required: false type: boolean job_binary_id: description: | The UUID of the job binary. in: path required: true type: string limit: description: | Maximum number of objects in response data. in: path required: false type: integer marker: description: | ID of the last element on the list which won't be in response. in: path required: false type: string plugin: description: | Filters the response by a plugin name. in: path required: false type: string sort_by_cluster_templates: description: | The field for sorting cluster templates. this parameter accepts the following values: ``name``, ``plugin_name``, ``hadoop_version``, ``created_at``, ``updated_at``, ``id``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_clusters: description: | The field for sorting clusters. this parameter accepts the following values: ``name``, ``plugin_name``, ``hadoop_version``, ``status``, ``id``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_data_sources: description: | The field for sorting data sources. this parameter accepts the following values: ``id``, ``name``, ``type``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_job_binary: description: | The field for sorting job binaries. this parameter accepts the following values: ``id``, ``name``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_job_binary_internals: description: | The field for sorting job binary internals. this parameter accepts the following values: ``id``, ``name``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_job_execution: description: | The field for sorting job executions. this parameter accepts the following values: ``id``, ``job_template``, ``cluster``, ``status``. Also this values can started with ``-`` prefix for descending sort. For example: ``-cluster``. in: path required: false type: string sort_by_jobs: description: | The field for sorting jobs. this parameter accepts the following values: ``id``, ``name``, ``type``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_node_group_templates: description: | The field for sorting node group templates. this parameter accepts the following values: ``name``, ``plugin_name``, ``hadoop_version``, ``created_at``, ``updated_at``, ``id``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string type_2: description: | Filters the response by a job type. in: path required: false type: string url_cluster_id: description: | The ID of the cluster in: path required: true type: string url_cluster_template_id: description: | The unique identifier of the cluster template. in: path required: true type: string url_data_source_id: description: | The UUID of the data source. in: path required: true type: string url_image_id: description: | The UUID of the image. in: path required: true type: string url_job_binary_id: description: | The UUID of the job binary. in: path required: true type: string url_job_binary_internals_id: description: | The UUID of the job binary internal. in: path required: true type: string url_job_binary_internals_name: description: | The name of the job binary internal. in: path required: true type: string url_job_execution_id: description: | The UUID of the job execution. in: path required: true type: string url_job_id: description: | The UUID of the job. in: path required: true type: string url_node_group_template_id: description: | The UUID of the node group template. in: path required: true type: string url_plugin_name: description: | Name of the plugin. in: path required: true type: string url_project_id: description: | UUID of the project. in: path required: true type: string version: description: | Filters the response by a plugin version. in: path required: true type: string version_1: description: | Version of the plugin. in: path required: false type: string # variables in body args: description: | The list of arguments. in: body required: true type: array auto_security_group: description: | If set to ``True``, the cluster group is automatically secured. in: body required: true type: boolean availability_zone: description: | The availability of the node in the cluster. in: body required: true type: string binaries: description: | The list of job binary internal objects. in: body required: true type: array cluster_configs: description: | A set of key and value pairs that contain the cluster configuration. in: body required: true type: object cluster_id: description: | The UUID of the cluster. in: body required: true type: string cluster_template_description: description: | Description of the cluster template in: body required: false type: string cluster_template_id: description: | The UUID of the cluster template. in: body required: true type: string cluster_template_name: description: | The name of the cluster template. in: body required: true type: string clusters: description: | The list of clusters. in: body required: true type: array configs: description: | The mappings of the job tasks. in: body required: true type: object count: description: | The number of nodes in the cluster. in: body required: true type: integer created: description: | The date and time when the image was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string created_at: description: | The date and time when the cluster was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string created_at_1: description: | The date and time when the object was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string created_at_2: description: | The date and time when the node was created in the cluster. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string created_at_3: description: | The date and time when the job execution object was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string data_source_description: description: | The description of the data source object. in: body required: true type: string data_source_id: description: | The UUID of the data source. in: body required: true type: string data_source_name: description: | The name of the data source. in: body required: true type: string data_source_urls: description: | The data source URLs. in: body required: true type: object datasize: description: | The size of the data stored in the internal database. in: body required: true type: integer default_image_id: description: | The default ID of the image. in: body required: true type: string description: description: | The description of the cluster. in: body required: true type: string description_3: description: | The description of the node in the cluster. in: body required: true type: string description_7: description: | Description of the image. in: body required: false type: string description_plugin: description: | The full description of the plugin. in: body required: true type: string domain_name: description: | Domain name for internal and external hostname resolution. Required if DNS service is enabled. in: body required: false type: string end_time: description: | The end date and time of the job execution. The date and time when the job completed execution. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string flavor_id: description: | The ID of the flavor. in: body required: true type: string floating_ip_pool: description: | The UUID of the pool in the template. in: body required: true type: string hadoop_version: description: | The version of the Hadoop used in the cluster. in: body required: true type: string hadoop_version_1: description: | The version of the Hadoop. in: body required: true type: string id: description: | The UUID of the cluster. in: body required: true type: string id_1: description: | The ID of the object. in: body required: true type: string image: description: | A set of key and value pairs that contain image properties. in: body required: true type: object image_description: description: | The description of the image. in: body required: true type: string image_id: description: | The UUID of the image. in: body required: true type: string image_name: description: | The name of the operating system image. in: body required: true type: string images: description: | The list of images and their properties. in: body required: true type: array info: description: | A set of key and value pairs that contain cluster information. in: body required: true type: object info_1: description: | The report of the executed job objects. in: body required: true type: object input_id: description: | The UUID of the input. in: body required: true type: string interface: description: | The interfaces of the job object. in: body required: true type: array is_default: description: | If set to ``true``, the cluster is the default cluster. in: body required: true type: boolean is_protected: description: | If set to ``true``, the cluster is protected. in: body required: true type: boolean is_protected_2: description: | If set to ``true``, the node is protected. in: body required: true type: boolean is_protected_3: description: | If set to ``true``, the job execution object is protected. in: body required: true type: boolean is_proxy_gateway: description: | If set to ``true``, the node is the proxy gateway. in: body required: true type: boolean is_public: description: | If set to ``true``, the cluster is public. in: body required: true type: boolean is_transient: description: | If set to ``true``, the cluster is transient. in: body required: true type: boolean job_binary_description: description: | The description of the job binary object. in: body required: true type: string job_binary_internals_id: description: | The UUID of the job binary internal. in: body required: true type: string job_binary_internals_name: description: | The name of the job binary internal. in: body required: true type: string job_binary_name: description: | The name of the object. in: body required: true type: string job_description: description: | The description of the job object. in: body required: true type: string job_execution: description: | A set of key and value pairs that contain the job object. in: body required: true type: object job_execution_id: description: | The UUID of the job execution object. in: body required: true type: string job_execution_is_public: description: | If set to ``true``, the job execution object is public. in: body required: true type: boolean job_executions: description: | The list of job execution objects. in: body required: true type: array job_id: description: | The UUID of the job object. in: body required: true type: string job_name: description: | The name of the job object. in: body required: true type: string job_types: description: | The list of plugins and their job types. in: body required: true type: array jobs: description: | The list of the jobs. in: body required: true type: array libs: description: | The list of the job object properties. in: body required: true type: array mains: description: | The list of the job object and their properties. in: body required: true type: array management_public_key: description: | The SSH key for the management network. in: body required: true type: string markers: description: | The markers of previous and following pages of data. This field exists only if ``limit`` is passed to request. in: body required: false type: object metadata: description: | A set of key and value pairs that contain image metadata. in: body required: true type: object minDisk: description: | The minimum disk space, in GB. in: body required: true type: integer minRam: description: | The minimum amount of random access memory (RAM) for the image, in GB. in: body required: true type: integer name: description: | The name of the cluster. in: body required: true type: string name_1: description: | The name of the object. in: body required: true type: string neutron_management_network: description: | The UUID of the neutron management network. in: body required: true type: string next: description: | The marker of next page of list data. in: body required: false type: string node_configs: description: | A set of key and value pairs that contain the node configuration in the cluster. in: body required: true type: object node_group_template_description: description: | Description of the node group template in: body required: false type: string node_group_template_id: description: | The UUID of the node group template. in: body required: true type: string node_group_template_name: description: | The name of the node group template. in: body required: true type: string node_groups: description: | The detail properties of the node in key-value pairs. in: body required: true type: object node_processes: description: | The list of the processes performed by the node. in: body required: true type: array object_is_protected: description: | If set to ``true``, the object is protected. in: body required: true type: boolean object_is_public: description: | If set to ``true``, the object is public. in: body required: true type: boolean object_shares: description: | The sharing of resources in the cluster. in: body required: true type: string oozie_job_id: description: | The UUID of the ``oozie_job``. in: body required: true type: string output_id: description: | The UUID of the output of job execution object. in: body required: true type: string params: description: | The mappings of values to the parameters. in: body required: true type: object plugin_name: description: | The name of the plugin. in: body required: true type: string plugins: description: | The list of plugins. in: body required: true type: array prev: description: | The marker of previous page. May be ``null`` if previous page is first or if current page is first. in: body required: false type: string progress: description: | A progress indicator, as a percentage value, for the amount of image content that has been processed. in: body required: true type: integer project_id: description: | The UUID of the project. in: body required: true type: string provision_progress: description: | A list of the cluster progresses. in: body required: true type: array return_code: description: | The code returned after job has executed. in: body required: true type: string security_groups: description: | The security groups of the node. in: body required: true type: string shares: description: | The shares of the cluster. in: body required: true type: string start_time: description: | The date and time when the job started. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string status: description: | The status of the cluster. in: body required: true type: string status_1: description: | The current status of the image. in: body required: true type: string status_description: description: | The description of the cluster status. in: body required: true type: string tags: description: | List of tags to add. in: body required: true type: array tags_1: description: | Lists images only with specific tag. Can be used multiple times. in: body required: false type: string tags_2: description: | One or more image tags. in: body required: true type: array tags_3: description: | List of tags to remove. in: body required: true type: array tenant_id: description: | The UUID of the tenant. in: body required: true type: string title: description: | The title of the plugin. in: body required: true type: string trust_id: description: | The id of the trust. in: body required: true type: integer type: description: | The type of the data source object. in: body required: true type: string type_1: description: | The type of the job object. in: body required: true type: string updated: description: | The date and time when the image was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string updated_at: description: | The date and time when the cluster was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string updated_at_1: description: | The date and time when the object was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string updated_at_2: description: | The date and time when the node was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string updated_at_3: description: | The date and time when the job execution object was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string url: description: | The url of the data source object. in: body required: true type: string url_1: description: | The url of the job binary object. in: body required: true type: string use_autoconfig: description: | If set to ``true``, the cluster is auto configured. in: body required: true type: boolean use_autoconfig_1: description: | If set to ``true``, the node is auto configured. in: body required: true type: boolean username: description: | The name of the user for the image. in: body required: true type: string username_1: description: | The user name to log in to an instance operating system for remote operations execution. in: body required: true type: string versions: description: | The list of plugin versions. in: body required: true type: array volume_local_to_instance: description: | If set to ``true``, the volume is local to the instance. in: body required: true type: boolean volume_mount_prefix: description: | The mount point of the node. in: body required: true type: string volume_type: description: | The type of volume in a node. in: body required: true type: string volumes_availability_zone: description: | The availability zone of the volumes. in: body required: true type: string volumes_per_node: description: | The number of volumes for the node. in: body required: true type: integer volumes_size: description: | The size of the volumes in a node. in: body required: true type: integer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/plugins.inc0000664000175000017500000000543100000000000020547 0ustar00zuulzuul00000000000000.. -*- rst -*- ======= Plugins ======= A plugin object defines the Hadoop or Spark version that it can install and which configurations can be set for the cluster. Show plugin details =================== .. rest_method:: GET /v1.1/{project_id}/plugins/{plugin_name} Shows details for a plugin. Normal response codes: 200 Error response codes: 400, 500 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - plugin_name: url_plugin_name Response Parameters ------------------- .. rest_parameters:: parameters.yaml - versions: versions - title: title - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugin-show-response.json :language: javascript List plugins ============ .. rest_method:: GET /v1.1/{project_id}/plugins Lists all registered plugins. Normal response codes: 200 Error response codes: 400, 500 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - title: title - versions: versions - plugins: plugins - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugins-list-response.json :language: javascript Show plugin version details =========================== .. rest_method:: GET /v1.1/{project_id}/plugins/{plugin_name}/{version} Shows details for a plugin version. Normal response codes: 200 Error response codes: 400, 500 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - plugin_name: url_plugin_name - version: version Response Parameters ------------------- .. rest_parameters:: parameters.yaml - versions: versions - title: title - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugin-version-show-response.json :language: javascript Update plugin details ===================== .. rest_method:: PATCH /v1.1/{project_id}/plugins/{plugin_name} Updates details for a plugin. Normal response codes: 202 Error response codes: 400, 500 Request ------- .. rest_parameters:: parameters.yaml - project_id: url_project_id - plugin_name: url_plugin_name Request Example --------------- .. literalinclude:: samples/plugins/plugin-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - title: title - versions: versions - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugin-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.589891 sahara-16.0.0/api-ref/source/v1.1/samples/0000775000175000017500000000000000000000000020034 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.6138911 sahara-16.0.0/api-ref/source/v1.1/samples/cluster-templates/0000775000175000017500000000000000000000000023511 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-request.json0000664000175000017500000000065300000000000032631 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251" }, { "name": "master", "count": 1, "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae" } ], "name": "cluster-template" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-response.json0000664000175000017500000000574500000000000033006 0ustar00zuulzuul00000000000000{ "cluster_template": { "is_public": false, "anti_affinity": [], "name": "cluster-template", "created_at": "2015-09-14T10:38:44", "tenant_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": {}, "shares": null, "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": null, "is_default": false, "updated_at": null, "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "1751c04e-8f39-467e-a421-480961172d4b", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "3ee85068-c455-4391-9db2-b54a20b99df3", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": null, "domain_name": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "description": null, "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/cluster-templates/cluster-template-show-response.json0000664000175000017500000000600700000000000032513 0ustar00zuulzuul00000000000000{ "cluster_template": { "is_public": false, "anti_affinity": [], "name": "cluster-template", "created_at": "2015-09-14T10:38:44", "tenant_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": {}, "shares": null, "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": null, "is_default": false, "updated_at": null, "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "1751c04e-8f39-467e-a421-480961172d4b", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "3ee85068-c455-4391-9db2-b54a20b99df3", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "domain_name": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "description": null, "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-request.json0000664000175000017500000000034300000000000032644 0ustar00zuulzuul00000000000000{ "description": "Updated template", "plugin_name": "vanilla", "hadoop_version": "2.7.1", "name": "vanilla-updated", "cluster_configs": { "HDFS": { "dfs.replication": 2 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-response.json0000664000175000017500000000437000000000000033016 0ustar00zuulzuul00000000000000{ "cluster_template": { "is_public": false, "anti_affinity": [], "name": "vanilla-updated", "created_at": "2015-08-21T08:41:24", "tenant_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": { "HDFS": { "dfs.replication": 2 } }, "shares": null, "id": "84d47e85-6094-473f-bf6d-5a7e6e86564e", "default_image_id": null, "is_default": false, "updated_at": "2015-09-14T10:45:57", "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": {}, "JobFlow": {}, "MapReduce": {}, "Hive": {}, "Hadoop": {}, "HDFS": {} }, "auto_security_group": true, "availability_zone": "", "count": 1, "flavor_id": "3", "id": "57b966ab-617e-4735-bf60-0cb991208a52", "security_groups": [], "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-08-21T08:41:24", "node_group_template_id": "a5533187-3f14-42c3-ba3a-196c13fe0fb5", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "all", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "datanode", "historyserver", "resourcemanager", "nodemanager", "oozie" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": null, "domain_name": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "description": "Updated template", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/cluster-templates/cluster-templates-list-response.json0000664000175000017500000001267300000000000032677 0ustar00zuulzuul00000000000000{ "cluster_templates": [ { "is_public": false, "anti_affinity": [], "name": "cluster-template", "created_at": "2015-09-14T10:38:44", "tenant_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": {}, "shares": null, "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": null, "is_default": false, "updated_at": null, "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "1751c04e-8f39-467e-a421-480961172d4b", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "3ee85068-c455-4391-9db2-b54a20b99df3", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "domain_name": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "description": null, "is_protected": false }, { "is_public": true, "anti_affinity": [], "name": "asd", "created_at": "2015-08-18T08:39:39", "tenant_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": { "general": {} }, "shares": null, "id": "5a9c787c-2078-4f7d-9a66-27759be9051b", "default_image_id": null, "is_default": false, "updated_at": "2015-09-14T08:41:15", "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": true, "availability_zone": "", "count": 1, "flavor_id": "2", "id": "a65864dd-3f99-4d29-a011-f7711cc23fa0", "security_groups": [], "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-08-18T08:39:39", "node_group_template_id": "42ce49de-1b8f-41d5-8f4a-244ec0826d92", "updated_at": null, "volumes_per_node": 1, "is_proxy_gateway": false, "name": "asd", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "jobtracker" ], "volumes_size": 10, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": null, "domain_name": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "description": "", "is_protected": false } ], "markers": { "prev": null, "next": "2c76e0d3-56cd-4d28-bb4f-4808e538c7b9" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.617891 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/0000775000175000017500000000000000000000000021700 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/cluster-create-request.json0000664000175000017500000000051300000000000027202 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.7.1", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "user_keypair_id": "test", "name": "vanilla-cluster", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/cluster-create-response.json0000664000175000017500000001307100000000000027353 0ustar00zuulzuul00000000000000{ "cluster": { "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": {}, "user_keypair_id": "test", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [], "hadoop_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T10:57:12", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Validating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/cluster-scale-request.json0000664000175000017500000000045000000000000027026 0ustar00zuulzuul00000000000000{ "add_node_groups": [ { "count": 1, "name": "b-worker", "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622" } ], "resize_node_groups": [ { "count": 4, "name": "worker" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/cluster-scale-response.json0000664000175000017500000004110300000000000027174 0ustar00zuulzuul00000000000000{ "cluster": { "info": { "YARN": { "Web UI": "http://172.18.168.115:8088", "ResourceManager": "http://172.18.168.115:8032" }, "HDFS": { "Web UI": "http://172.18.168.115:50070", "NameNode": "hdfs://vanilla-cluster-master-0:9000" }, "MapReduce JobHistory Server": { "Web UI": "http://172.18.168.115:19888" }, "JobFlow": { "Oozie": "http://172.18.168.115:11000" } }, "plugin_name": "vanilla", "hadoop_version": "2.7.1", "updated_at": "2015-09-14T11:01:15", "name": "vanilla-cluster", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "trust_id": null, "status_description": "", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "is_protected": false, "is_transient": false, "provision_progress": [ { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Create Heat stack", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:57:38", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:57:18", "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Configure instances", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:58:22", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:58:16", "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): Oozie", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:01:15", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:27", "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Configure instances", "step_type": "Plugin: configure cluster", "updated_at": "2015-09-14T10:59:21", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:58:22", "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Configure topology data", "step_type": "Plugin: configure cluster", "updated_at": "2015-09-14T10:59:37", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:59:21", "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 3, "successful": true, "step_name": "Start the following process(es): DataNodes, NodeManagers", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:11", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:01", "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Await DataNodes start up", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:21", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:11", "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): HistoryServer", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:27", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:21", "id": "c6327532-222b-416c-858f-73dbb32b8e97" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Wait for instance accessibility", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:58:14", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:57:41", "id": "d3eca726-8b44-473a-ac29-fba45a893725" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 0, "successful": true, "step_name": "Mount volumes to instances", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:58:15", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:58:14", "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): ResourceManager", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:00", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:59:55", "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): NameNode", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T10:59:54", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:59:38", "id": "e1701ff5-930a-4212-945a-43515dfe24d1" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Assign IPs", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:57:41", "tenant_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:57:38", "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9" } ], "status": "Active", "description": null, "use_autoconfig": true, "shares": null, "domain_name": null, "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "node_groups": [ { "volumes_per_node": 0, "volume_type": null, "updated_at": "2015-09-14T10:57:37", "name": "b-worker", "id": "b7a6dea4-c898-446b-8c67-4f378d4c06c4", "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048, "yarn.scheduler.maximum-allocation-mb": 2048 }, "MapReduce": { "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m", "mapreduce.reduce.memory.mb": 512, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "yarn.app.mapreduce.am.resource.mb": 256 } }, "auto_security_group": false, "volumes_availability_zone": null, "use_autoconfig": true, "security_groups": null, "shares": null, "node_processes": [ "datanode", "nodemanager" ], "availability_zone": null, "flavor_id": "2", "image_id": null, "volume_local_to_instance": false, "count": 1, "volumes_size": 0, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "volume_mount_prefix": "/volumes/disk", "instances": [], "is_proxy_gateway": false, "created_at": "2015-09-14T10:57:11" }, { "volumes_per_node": 0, "volume_type": null, "updated_at": "2015-09-14T10:57:36", "name": "master", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048, "yarn.scheduler.maximum-allocation-mb": 2048 }, "MapReduce": { "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m", "mapreduce.reduce.memory.mb": 512, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "yarn.app.mapreduce.am.resource.mb": 256 } }, "auto_security_group": false, "volumes_availability_zone": null, "use_autoconfig": true, "security_groups": null, "shares": null, "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "availability_zone": null, "flavor_id": "2", "image_id": null, "volume_local_to_instance": false, "count": 1, "volumes_size": 0, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "volume_mount_prefix": "/volumes/disk", "instances": [ { "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", "internal_ip": "10.50.0.60", "instance_name": "vanilla-cluster-master-0", "updated_at": "2015-09-14T10:57:39", "management_ip": "172.18.168.115", "created_at": "2015-09-14T10:57:36", "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491" } ], "is_proxy_gateway": false, "created_at": "2015-09-14T10:57:11" }, { "volumes_per_node": 0, "volume_type": null, "updated_at": "2015-09-14T10:57:37", "name": "worker", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048, "yarn.scheduler.maximum-allocation-mb": 2048 }, "MapReduce": { "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m", "mapreduce.reduce.memory.mb": 512, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "yarn.app.mapreduce.am.resource.mb": 256 } }, "auto_security_group": false, "volumes_availability_zone": null, "use_autoconfig": true, "security_groups": null, "shares": null, "node_processes": [ "datanode", "nodemanager" ], "availability_zone": null, "flavor_id": "2", "image_id": null, "volume_local_to_instance": false, "count": 4, "volumes_size": 0, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "volume_mount_prefix": "/volumes/disk", "instances": [ { "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", "internal_ip": "10.50.0.63", "instance_name": "vanilla-cluster-worker-0", "updated_at": "2015-09-14T10:57:39", "management_ip": "172.18.168.118", "created_at": "2015-09-14T10:57:37", "id": "f3633b30-c1e4-4144-930b-ab5b780b87be" }, { "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", "internal_ip": "10.50.0.62", "instance_name": "vanilla-cluster-worker-1", "updated_at": "2015-09-14T10:57:40", "management_ip": "172.18.168.117", "created_at": "2015-09-14T10:57:37", "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f" }, { "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", "internal_ip": "10.50.0.61", "instance_name": "vanilla-cluster-worker-2", "updated_at": "2015-09-14T10:57:40", "management_ip": "172.18.168.116", "created_at": "2015-09-14T10:57:37", "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7" } ], "is_proxy_gateway": false, "created_at": "2015-09-14T10:57:11" } ], "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "user_keypair_id": "apavlov", "anti_affinity": [], "created_at": "2015-09-14T10:57:11" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/cluster-show-response.json0000664000175000017500000001307100000000000027070 0ustar00zuulzuul00000000000000{ "cluster": { "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": {}, "user_keypair_id": "test", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [], "hadoop_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T10:57:12", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Validating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/cluster-update-request.json0000664000175000017500000000010000000000000027211 0ustar00zuulzuul00000000000000{ "name": "public-vanilla-cluster", "is_public": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/cluster-update-response.json0000664000175000017500000001307700000000000027400 0ustar00zuulzuul00000000000000{ "cluster": { "is_public": true, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": {}, "user_keypair_id": "test", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [], "hadoop_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T10:57:12", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "public-vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Validating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/clusters-list-response.json0000664000175000017500000003754700000000000027264 0ustar00zuulzuul00000000000000{ "clusters": [ { "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": { "YARN": { "Web UI": "http://172.18.168.115:8088", "ResourceManager": "http://172.18.168.115:8032" }, "HDFS": { "Web UI": "http://172.18.168.115:50070", "NameNode": "hdfs://vanilla-cluster-master-0:9000" }, "JobFlow": { "Oozie": "http://172.18.168.115:11000" }, "MapReduce JobHistory Server": { "Web UI": "http://172.18.168.115:19888" } }, "user_keypair_id": "apavlov", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [ { "created_at": "2015-09-14T10:57:36", "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491", "management_ip": "172.18.168.115", "updated_at": "2015-09-14T10:57:39", "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", "internal_ip": "10.50.0.60", "instance_name": "vanilla-cluster-master-0" } ], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:36", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [ { "created_at": "2015-09-14T10:57:37", "id": "f3633b30-c1e4-4144-930b-ab5b780b87be", "management_ip": "172.18.168.118", "updated_at": "2015-09-14T10:57:39", "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", "internal_ip": "10.50.0.63", "instance_name": "vanilla-cluster-worker-0" }, { "created_at": "2015-09-14T10:57:37", "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f", "management_ip": "172.18.168.117", "updated_at": "2015-09-14T10:57:40", "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", "internal_ip": "10.50.0.62", "instance_name": "vanilla-cluster-worker-1" }, { "created_at": "2015-09-14T10:57:37", "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7", "management_ip": "172.18.168.116", "updated_at": "2015-09-14T10:57:40", "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", "internal_ip": "10.50.0.61", "instance_name": "vanilla-cluster-worker-2" } ], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:37", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [ { "created_at": "2015-09-14T10:57:18", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af", "step_type": "Engine: create cluster", "step_name": "Create Heat stack", "updated_at": "2015-09-14T10:57:38", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:58:16", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a", "step_type": "Engine: create cluster", "step_name": "Configure instances", "updated_at": "2015-09-14T10:58:22", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:27", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): Oozie", "updated_at": "2015-09-14T11:01:15", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:58:22", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72", "step_type": "Plugin: configure cluster", "step_name": "Configure instances", "updated_at": "2015-09-14T10:59:21", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:59:21", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17", "step_type": "Plugin: configure cluster", "step_name": "Configure topology data", "updated_at": "2015-09-14T10:59:37", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:01", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): DataNodes, NodeManagers", "updated_at": "2015-09-14T11:00:11", "successful": true, "total": 3, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:11", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971", "step_type": "Plugin: start cluster", "step_name": "Await DataNodes start up", "updated_at": "2015-09-14T11:00:21", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:21", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "c6327532-222b-416c-858f-73dbb32b8e97", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): HistoryServer", "updated_at": "2015-09-14T11:00:27", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:57:41", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "d3eca726-8b44-473a-ac29-fba45a893725", "step_type": "Engine: create cluster", "step_name": "Wait for instance accessibility", "updated_at": "2015-09-14T10:58:14", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:58:14", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152", "step_type": "Engine: create cluster", "step_name": "Mount volumes to instances", "updated_at": "2015-09-14T10:58:15", "successful": true, "total": 0, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:59:55", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): ResourceManager", "updated_at": "2015-09-14T11:00:00", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:59:38", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "e1701ff5-930a-4212-945a-43515dfe24d1", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): NameNode", "updated_at": "2015-09-14T10:59:54", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:57:38", "tenant_id": "808d5032ea0446889097723bfc8e919d", "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9", "step_type": "Engine: create cluster", "step_name": "Assign IPs", "updated_at": "2015-09-14T10:57:41", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" } ], "hadoop_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T11:01:15", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Active" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-request.json0000664000175000017500000000056200000000000031222 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.6.0", "cluster_template_id": "9951f86d-57ba-43d6-9cb0-14ed2ec7a6cf", "default_image_id": "bc3c3d3c-2684-4bf8-a9fa-388fb71288a9", "user_keypair_id": "test", "name": "def-cluster", "count": 2, "cluster_configs": {}, "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/clusters/multiple-clusters-create-response.json0000664000175000017500000000017300000000000031366 0ustar00zuulzuul00000000000000{ "clusters": [ "a007a3e7-658f-4568-b0f2-fe2fd5efc554", "b012a6et-65hf-4566-b0f2-fe3fd7efc567" ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.617891 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/0000775000175000017500000000000000000000000022426 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-request.json0000664000175000017500000000022700000000000031763 0ustar00zuulzuul00000000000000{ "description": "This is hdfs input", "url": "hdfs://test-master-node:8020/user/hadoop/input", "type": "hdfs", "name": "hdfs_input" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-response.json0000664000175000017500000000067400000000000032137 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:09:36.148464", "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", "updated_at": null, "name": "hdfs_input", "description": "This is hdfs input", "url": "hdfs://test-master-node:8020/user/hadoop/input", "type": "hdfs" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-request.json0000664000175000017500000000031700000000000032173 0ustar00zuulzuul00000000000000{ "description": "This is input", "url": "swift://container/text", "credentials": { "password": "swordfish", "user": "dev" }, "type": "swift", "name": "swift_input" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-source-register-swift-response.json0000664000175000017500000000064100000000000032341 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:18:10.691493", "id": "953831f2-0852-49d8-ac71-af5805e25256", "updated_at": null, "name": "swift_input", "description": "This is input", "url": "swift://container/text", "type": "swift" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-source-show-response.json0000664000175000017500000000064100000000000030343 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:18:10.691493", "id": "953831f2-0852-49d8-ac71-af5805e25256", "updated_at": null, "name": "swift_input", "description": "This is input", "url": "swift://container/text", "type": "swift" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-source-update-request.json0000664000175000017500000000011000000000000030466 0ustar00zuulzuul00000000000000{ "description": "This is public input", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-source-update-response.json0000664000175000017500000000067700000000000030656 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": true, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-09-15 12:32:24.847493", "id": "953831f2-0852-49d8-ac71-af5805e25256", "updated_at": "2015-09-15 12:34:42.597435", "name": "swift_input", "description": "This is public input", "url": "swift://container/text", "type": "swift" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/data-sources/data-sources-list-response.json0000664000175000017500000000165200000000000030524 0ustar00zuulzuul00000000000000{ "data_sources": [ { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:18:10", "id": "953831f2-0852-49d8-ac71-af5805e25256", "name": "swift_input", "updated_at": null, "description": "This is input", "url": "swift://container/text", "type": "swift" }, { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:09:36", "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", "name": "hdfs_input", "updated_at": null, "description": "This is hdfs input", "url": "hdfs://test-master-node:8020/user/hadoop/input", "type": "hdfs" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.617891 sahara-16.0.0/api-ref/source/v1.1/samples/event-log/0000775000175000017500000000000000000000000021734 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/event-log/cluster-progress-response.json0000664000175000017500000000612700000000000030014 0ustar00zuulzuul00000000000000{ "status": "Error", "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076", "is_transient": false, "description": "", "user_keypair_id": "vgridnev", "updated_at": "2015-03-31 14:10:59", "plugin_name": "spark", "provision_progress": [ { "successful": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-03-31 14:10:20", "step_type": "Engine: create cluster", "updated_at": "2015-03-31 14:10:35", "events": [ { "instance_name": "sample-worker-spark-004", "successful": false, "created_at": "2015-03-31 14:10:35", "updated_at": null, "event_info": "Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", "instance_id": "b5ba5ba8-e9c1-47f7-9355-3ce0ec0e449d", "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", "id": "34afcfc7-bdb0-43cb-b142-283d560dc6ad" }, { "instance_name": "sample-worker-spark-001", "successful": true, "created_at": "2015-03-31 14:10:35", "updated_at": null, "event_info": null, "instance_id": "c532ab71-38da-475a-95f8-f8eb93b8f1c2", "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", "id": "4ba50414-5216-4161-bc7a-12716122b99d" } ], "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", "step_name": "Wait for instances to become active", "total": 5, "id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6" }, { "successful": true, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-03-31 14:10:12", "step_type": "Engine: create cluster", "updated_at": "2015-03-31 14:10:19", "events": [], "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", "step_name": "Run instances", "total": 5, "id": "407ba50a-c799-46af-9dfb-6aa5f6ade426" } ], "anti_affinity": [], "node_groups": [], "management_public_key": "Sahara", "status_description": "Creating cluster failed for the following reason(s): Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", "hadoop_version": "1.0.0", "id": "c26ec982-ba6b-4d75-1f8c-a50240164af0", "trust_id": null, "info": {}, "cluster_template_id": "5a9a09a3-9349-43bd-9058-16c401fad2d5", "name": "sample", "cluster_configs": {}, "created_at": "2015-03-31 14:10:07", "default_image_id": "e6a6c5da-67be-4017-a7d2-81f466efe67e", "tenant_id": "9cd1314a0a31493282b6712b76a8fcda" } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.621891 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/0000775000175000017500000000000000000000000022764 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/image-register-request.json0000664000175000017500000000012100000000000030243 0ustar00zuulzuul00000000000000{ "username": "ubuntu", "description": "Ubuntu image for Hadoop 2.7.1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/image-register-response.json0000664000175000017500000000134100000000000030416 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-03-24T10:05:10Z", "metadata": { "_sahara_description": "Ubuntu image for Hadoop 2.7.1", "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true, "_sahara_tag_2.7.1": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "2.7.1" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.7.1-ubuntu-14.04", "description": "Ubuntu image for Hadoop 2.7.1", "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/image-show-response.json0000664000175000017500000000120200000000000027546 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-02-03T10:29:32Z", "metadata": { "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true, "_sahara_tag_2.6.0": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "2.6.0" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.6.0-ubuntu-14.04", "description": null, "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/image-tags-add-request.json0000664000175000017500000000012500000000000030107 0ustar00zuulzuul00000000000000{ "tags": [ "vanilla", "2.7.1", "some_other_tag" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/image-tags-add-response.json0000664000175000017500000000145700000000000030266 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-03-24T10:18:33Z", "metadata": { "_sahara_tag_vanilla": true, "_sahara_description": "Ubuntu image for Hadoop 2.7.1", "_sahara_username": "ubuntu", "_sahara_tag_some_other_tag": true, "_sahara_tag_2.7.1": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "some_other_tag", "2.7.1" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.6.0-ubuntu-14.04", "description": "Ubuntu image for Hadoop 2.7.1", "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/image-tags-delete-request.json0000664000175000017500000000006100000000000030620 0ustar00zuulzuul00000000000000{ "tags": [ "some_other_tag" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/image-tags-delete-response.json0000664000175000017500000000134100000000000030770 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-03-24T10:19:28Z", "metadata": { "_sahara_description": "Ubuntu image for Hadoop 2.7.1", "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true, "_sahara_tag_2.7.1": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "2.7.1" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.7.1-ubuntu-14.04", "description": "Ubuntu image for Hadoop 2.7.1", "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/image-registry/images-list-response.json0000664000175000017500000000261000000000000027730 0ustar00zuulzuul00000000000000{ "images": [ { "name": "ubuntu-vanilla-2.7.1", "id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "created": "2015-08-06T08:17:14Z", "metadata": { "_sahara_tag_2.7.1": true, "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true }, "username": "ubuntu", "progress": 100, "OS-EXT-IMG-SIZE:size": 998716928, "status": "ACTIVE", "minDisk": 0, "tags": [ "vanilla", "2.7.1" ], "updated": "2015-09-04T09:35:09Z", "minRam": 0, "description": null }, { "name": "cdh-latest", "id": "ff74035b-9da7-4edf-981d-57f270ed337d", "created": "2015-09-04T11:56:44Z", "metadata": { "_sahara_username": "ubuntu", "_sahara_tag_5.4.0": true, "_sahara_tag_cdh": true }, "username": "ubuntu", "progress": 100, "OS-EXT-IMG-SIZE:size": 3281453056, "status": "ACTIVE", "minDisk": 0, "tags": [ "5.4.0", "cdh" ], "updated": "2015-09-04T12:46:42Z", "minRam": 0, "description": null } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.621891 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/0000775000175000017500000000000000000000000022400 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/create-request.json0000664000175000017500000000031400000000000026222 0ustar00zuulzuul00000000000000{ "url": "swift://container/jar-example.jar", "name": "jar-example.jar", "description": "This is a job binary", "extra": { "password": "swordfish", "user": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/create-response.json0000664000175000017500000000063500000000000026376 0ustar00zuulzuul00000000000000{ "job_binary": { "is_public": false, "description": "This is a job binary", "url": "swift://container/jar-example.jar", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 14:49:20.106452", "id": "07f86352-ee8a-4b08-b737-d705ded5ff9c", "updated_at": null, "name": "jar-example.jar", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/list-response.json0000664000175000017500000000243500000000000026106 0ustar00zuulzuul00000000000000{ "binaries": [ { "is_public": false, "description": "", "url": "internal-db://d2498cbf-4589-484a-a814-81436c18beb3", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 12:36:59.375060", "updated_at": null, "id": "84248975-3c82-4206-a58d-6e7fb3a563fd", "name": "example.pig", "is_protected": false }, { "is_public": false, "description": "", "url": "internal-db://22f1d87a-23c8-483e-a0dd-cb4a16dde5f9", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 12:43:52.265899", "updated_at": null, "id": "508fc62d-1d58-4412-b603-bdab307bb926", "name": "udf.jar", "is_protected": false }, { "is_public": false, "description": "", "url": "swift://container/jar-example.jar", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 14:25:04.970513", "updated_at": null, "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", "name": "jar-example.jar", "is_protected": false } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/show-data-response0000664000175000017500000000024000000000000026042 0ustar00zuulzuul00000000000000A = load '$INPUT' using PigStorage(':') as (fruit: chararray); B = foreach A generate com.hadoopbook.pig.Trim(fruit); store B into '$OUTPUT' USING PigStorage();././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/show-response.json0000664000175000017500000000063400000000000026112 0ustar00zuulzuul00000000000000{ "job_binary": { "is_public": false, "description": "an example jar file", "url": "swift://container/jar-example.jar", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 14:25:04.970513", "updated_at": null, "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", "name": "jar-example.jar", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/update-request.json0000664000175000017500000000021100000000000026235 0ustar00zuulzuul00000000000000{ "url": "swift://container/new-jar-example.jar", "name": "new-jar-example.jar", "description": "This is a new job binary" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binaries/update-response.json0000664000175000017500000000065100000000000026413 0ustar00zuulzuul00000000000000{ "job_binary": { "is_public": false, "description": "This is a new job binary", "url": "swift://container/new-jar-example.jar", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2015-09-15 12:42:51.421542", "updated_at": null, "id": "b713d7ad-4add-4f12-g1b6-cdg71aaef350", "name": "new-jar-example.jar", "is_protected": false } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.621891 sahara-16.0.0/api-ref/source/v1.1/samples/job-binary-internals/0000775000175000017500000000000000000000000024065 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binary-internals/create-response.json0000664000175000017500000000052700000000000030063 0ustar00zuulzuul00000000000000{ "job_binary_internal": { "is_public": false, "name": "script.pig", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 13:17:35.994466", "updated_at": null, "datasize": 160, "id": "4833dc4b-8682-4d5b-8a9f-2036b47a0996", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binary-internals/list-response.json0000664000175000017500000000134400000000000027571 0ustar00zuulzuul00000000000000{ "binaries": [ { "is_public": false, "name": "example.pig", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 12:36:59.329034", "updated_at": null, "datasize": 161, "id": "d2498cbf-4589-484a-a814-81436c18beb3", "is_protected": false }, { "is_public": false, "name": "udf.jar", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 12:43:52.008620", "updated_at": null, "datasize": 3745, "id": "22f1d87a-23c8-483e-a0dd-cb4a16dde5f9", "is_protected": false } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binary-internals/show-data-response0000664000175000017500000000023700000000000027535 0ustar00zuulzuul00000000000000A = load '$INPUT' using PigStorage(':') as (fruit: chararray); B = foreach A generate com.hadoopbook.pig.Trim(fruit); store B into '$OUTPUT' USING PigStorage()././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binary-internals/show-response.json0000664000175000017500000000052700000000000027600 0ustar00zuulzuul00000000000000{ "job_binary_internal": { "is_public": false, "name": "script.pig", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 13:17:35.994466", "updated_at": null, "datasize": 160, "id": "4833dc4b-8682-4d5b-8a9f-2036b47a0996", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binary-internals/update-request.json0000664000175000017500000000006400000000000027730 0ustar00zuulzuul00000000000000{ "name": "public-jbi", "is_public": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-binary-internals/update-response.json0000664000175000017500000000055600000000000030104 0ustar00zuulzuul00000000000000{ "job_binary_internal": { "is_public": true, "name": "public-jbi", "tenant_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2015-09-15 13:21:54.485912", "updated_at": "2015-09-15 13:24:24.590124", "datasize": 200, "id": "2433dc4b-8682-4d5b-8a9f-2036d47a0996", "is_protected": false } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.621891 sahara-16.0.0/api-ref/source/v1.1/samples/job-executions/0000775000175000017500000000000000000000000022772 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-executions/cancel-response.json0000664000175000017500000001347000000000000026753 0ustar00zuulzuul00000000000000{ "job_execution": { "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "is_protected": false, "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "created_at": "2015-09-15T09:49:24", "end_time": "2015-09-15T12:50:46", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "is_public": false, "updated_at": "2015-09-15T09:50:46", "return_code": null, "data_source_urls": { "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" }, "tenant_id": "808d5032ea0446889097723bfc8e919d", "start_time": "2015-09-15T12:49:43", "id": "20da9edb-12ce-4b45-a473-41baeefef997", "oozie_job_id": "0000001-150915094349962-oozie-hado-W", "info": { "user": "hadoop", "actions": [ { "name": ":start:", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": "job-node", "data": null, "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", "errorCode": null, "id": "0000001-150915094349962-oozie-hado-W@:start:", "consoleUrl": "-", "errorMessage": null, "toString": "Action name[:start:] status[OK]", "stats": null, "type": ":START:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "job-node", "trackerUri": "http://172.18.168.119:8032", "externalStatus": "FAILED/KILLED", "status": "ERROR", "externalId": "job_1442310173665_0002", "transition": "fail", "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "JA018", "id": "0000001-150915094349962-oozie-hado-W@job-node", "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", "toString": "Action name[job-node] status[ERROR]", "stats": null, "type": "pig", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "fail", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": null, "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "E0729", "id": "0000001-150915094349962-oozie-hado-W@fail", "consoleUrl": "-", "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", "toString": "Action name[fail] status[OK]", "stats": null, "type": ":KILL:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", "externalChildIDs": null, "cred": "null" } ], "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", "status": "KILLED", "group": null, "externalId": null, "acl": null, "run": 0, "appName": "job-wf", "parentId": null, "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", "id": "0000001-150915094349962-oozie-hado-W", "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-executions/job-ex-response.json0000664000175000017500000001347000000000000026712 0ustar00zuulzuul00000000000000{ "job_execution": { "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "is_protected": false, "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "created_at": "2015-09-15T09:49:24", "end_time": "2015-09-15T12:50:46", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "is_public": false, "updated_at": "2015-09-15T09:50:46", "return_code": null, "data_source_urls": { "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" }, "tenant_id": "808d5032ea0446889097723bfc8e919d", "start_time": "2015-09-15T12:49:43", "id": "20da9edb-12ce-4b45-a473-41baeefef997", "oozie_job_id": "0000001-150915094349962-oozie-hado-W", "info": { "user": "hadoop", "actions": [ { "name": ":start:", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": "job-node", "data": null, "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", "errorCode": null, "id": "0000001-150915094349962-oozie-hado-W@:start:", "consoleUrl": "-", "errorMessage": null, "toString": "Action name[:start:] status[OK]", "stats": null, "type": ":START:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "job-node", "trackerUri": "http://172.18.168.119:8032", "externalStatus": "FAILED/KILLED", "status": "ERROR", "externalId": "job_1442310173665_0002", "transition": "fail", "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "JA018", "id": "0000001-150915094349962-oozie-hado-W@job-node", "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", "toString": "Action name[job-node] status[ERROR]", "stats": null, "type": "pig", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "fail", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": null, "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "E0729", "id": "0000001-150915094349962-oozie-hado-W@fail", "consoleUrl": "-", "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", "toString": "Action name[fail] status[OK]", "stats": null, "type": ":KILL:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", "externalChildIDs": null, "cred": "null" } ], "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", "status": "KILLED", "group": null, "externalId": null, "acl": null, "run": 0, "appName": "job-wf", "parentId": null, "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", "id": "0000001-150915094349962-oozie-hado-W", "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-executions/job-ex-update-request.json0000664000175000017500000000003200000000000030012 0ustar00zuulzuul00000000000000{ "is_public": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-executions/job-ex-update-response.json0000664000175000017500000001346700000000000030200 0ustar00zuulzuul00000000000000{ "job_execution": { "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "is_protected": false, "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "created_at": "2015-09-15T09:49:24", "end_time": "2015-09-15T12:50:46", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "is_public": true, "updated_at": "2015-09-15T09:50:46", "return_code": null, "data_source_urls": { "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" }, "tenant_id": "808d5032ea0446889097723bfc8e919d", "start_time": "2015-09-15T12:49:43", "id": "20da9edb-12ce-4b45-a473-41baeefef997", "oozie_job_id": "0000001-150915094349962-oozie-hado-W", "info": { "user": "hadoop", "actions": [ { "name": ":start:", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": "job-node", "data": null, "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", "errorCode": null, "id": "0000001-150915094349962-oozie-hado-W@:start:", "consoleUrl": "-", "errorMessage": null, "toString": "Action name[:start:] status[OK]", "stats": null, "type": ":START:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "job-node", "trackerUri": "http://172.18.168.119:8032", "externalStatus": "FAILED/KILLED", "status": "ERROR", "externalId": "job_1442310173665_0002", "transition": "fail", "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "JA018", "id": "0000001-150915094349962-oozie-hado-W@job-node", "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", "toString": "Action name[job-node] status[ERROR]", "stats": null, "type": "pig", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "fail", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": null, "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "E0729", "id": "0000001-150915094349962-oozie-hado-W@fail", "consoleUrl": "-", "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", "toString": "Action name[fail] status[OK]", "stats": null, "type": ":KILL:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", "externalChildIDs": null, "cred": "null" } ], "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", "status": "KILLED", "group": null, "externalId": null, "acl": null, "run": 0, "appName": "job-wf", "parentId": null, "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", "id": "0000001-150915094349962-oozie-hado-W", "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-executions/list-response.json0000664000175000017500000001443500000000000026503 0ustar00zuulzuul00000000000000{ "job_executions": [ { "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "is_protected": false, "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "created_at": "2015-09-15T09:49:24", "end_time": "2015-09-15T12:50:46", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "is_public": false, "updated_at": "2015-09-15T09:50:46", "return_code": null, "data_source_urls": { "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" }, "tenant_id": "808d5032ea0446889097723bfc8e919d", "start_time": "2015-09-15T12:49:43", "id": "20da9edb-12ce-4b45-a473-41baeefef997", "oozie_job_id": "0000001-150915094349962-oozie-hado-W", "info": { "user": "hadoop", "actions": [ { "name": ":start:", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": "job-node", "data": null, "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", "errorCode": null, "id": "0000001-150915094349962-oozie-hado-W@:start:", "consoleUrl": "-", "errorMessage": null, "toString": "Action name[:start:] status[OK]", "stats": null, "type": ":START:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "job-node", "trackerUri": "http://172.18.168.119:8032", "externalStatus": "FAILED/KILLED", "status": "ERROR", "externalId": "job_1442310173665_0002", "transition": "fail", "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "JA018", "id": "0000001-150915094349962-oozie-hado-W@job-node", "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", "toString": "Action name[job-node] status[ERROR]", "stats": null, "type": "pig", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "fail", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": null, "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "E0729", "id": "0000001-150915094349962-oozie-hado-W@fail", "consoleUrl": "-", "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", "toString": "Action name[fail] status[OK]", "stats": null, "type": ":KILL:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", "externalChildIDs": null, "cred": "null" } ], "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", "status": "KILLED", "group": null, "externalId": null, "acl": null, "run": 0, "appName": "job-wf", "parentId": null, "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", "id": "0000001-150915094349962-oozie-hado-W", "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" } } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.621891 sahara-16.0.0/api-ref/source/v1.1/samples/job-types/0000775000175000017500000000000000000000000021750 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/job-types/job-types-list-response.json0000664000175000017500000002117400000000000027371 0ustar00zuulzuul00000000000000{ "job_types": [ { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Hive" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Java" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "MapReduce" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "MapReduce.Streaming" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Pig" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Shell" }, { "plugins": [ { "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", "versions": { "1.0.0": {} }, "title": "Apache Spark", "name": "spark" } ], "name": "Spark" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.625891 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/0000775000175000017500000000000000000000000020771 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/job-create-request.json0000664000175000017500000000035400000000000025367 0ustar00zuulzuul00000000000000{ "description": "This is pig job example", "mains": [ "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e" ], "libs": [ "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27" ], "type": "Pig", "name": "pig-job-example" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/job-create-response.json0000664000175000017500000000227700000000000025543 0ustar00zuulzuul00000000000000{ "job": { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-03-27 08:48:38.630827", "id": "71defc8f-d005-484f-9d86-1aedf644d1ef", "name": "pig-job-example", "description": "This is pig job example", "interface": [], "libs": [ { "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:53", "id": "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27", "name": "binary-job", "updated_at": null, "description": "", "url": "internal-db://c6a925fa-ac1d-4b2e-b88a-7054e1927521" } ], "type": "Pig", "is_protected": false, "mains": [ { "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-03 10:47:51", "id": "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e", "name": "pig", "updated_at": null, "description": "", "url": "internal-db://872878f6-72ea-44db-8d1d-e6a6396d2df0" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/job-execute-request.json0000664000175000017500000000072700000000000025572 0ustar00zuulzuul00000000000000{ "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "job_configs": { "configs": { "mapred.map.tasks": "1", "mapred.reduce.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/job-execute-response.json0000664000175000017500000000160600000000000025735 0ustar00zuulzuul00000000000000{ "job_execution": { "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "is_protected": false, "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "created_at": "2015-09-15T09:49:24", "is_public": false, "id": "20da9edb-12ce-4b45-a473-41baeefef997", "tenant_id": "808d5032ea0446889097723bfc8e919d", "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "info": { "status": "PENDING" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/job-show-response.json0000664000175000017500000000146600000000000025257 0ustar00zuulzuul00000000000000{ "job": { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", "name": "Edp-test-job", "updated_at": null, "description": "", "interface": [], "libs": [ { "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", "name": "binary-job.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" } ], "type": "MapReduce", "mains": [], "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/job-update-request.json0000664000175000017500000000013600000000000025404 0ustar00zuulzuul00000000000000{ "description": "This is public pig job example", "name": "public-pig-job-example" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/job-update-response.json0000664000175000017500000000153600000000000025557 0ustar00zuulzuul00000000000000{ "job": { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", "name": "public-pig-job-example", "updated_at": null, "description": "This is public pig job example", "interface": [], "libs": [ { "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", "name": "binary-job.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" } ], "type": "MapReduce", "mains": [], "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/jobs/jobs-list-response.json0000664000175000017500000000462100000000000025431 0ustar00zuulzuul00000000000000{ "jobs": [ { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", "name": "Edp-test-job-3d60854e", "updated_at": null, "description": "", "interface": [], "libs": [ { "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", "name": "binary-job-339c2d1a.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-c71e6bce.sahara/binary-job-339c2d1a.jar" } ], "type": "MapReduce", "mains": [], "is_protected": false }, { "is_public": false, "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:44", "id": "4d1f3759-3497-4927-8352-910bacf24e62", "name": "Edp-test-job-6b6953c8", "updated_at": null, "description": "", "interface": [], "libs": [ { "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:44", "id": "e0d47800-4ac1-4d63-a2e1-c92d669a44e2", "name": "binary-job-6f21a2f8.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-b409ec68.sahara/binary-job-6f21a2f8.jar" } ], "type": "Pig", "mains": [ { "tenant_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:44", "id": "e073e896-f123-4b76-995f-901d786262df", "name": "binary-job-d4f8bd75.pig", "updated_at": null, "description": "", "url": "swift://Edp-test-b409ec68.sahara/binary-job-d4f8bd75.pig" } ], "is_protected": false } ], "markers": { "prev": null, "next": "c53832da-6e7b-449e-a166-9f9ce1718d03" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.625891 sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/0000775000175000017500000000000000000000000024107 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-request.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-request.js0000664000175000017500000000044300000000000033265 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "name": "master", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "flavor_id": "2" } ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-response.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-response.j0000664000175000017500000000201400000000000033244 0ustar00zuulzuul00000000000000{ "node_group_template": { "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "is_protected": false, "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "hadoop_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "security_groups": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-show-response.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-show-response.jso0000664000175000017500000000216700000000000033334 0ustar00zuulzuul00000000000000{ "node_group_template": { "is_public": false, "image_id": null, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "description": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "is_protected": false, "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "security_groups": null, "volume_type": null } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-request.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-request.js0000664000175000017500000000033300000000000033302 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_processes": [ "datanode" ], "name": "new", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "flavor_id": "2" } ././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-response.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-response.j0000664000175000017500000000167000000000000033272 0ustar00zuulzuul00000000000000{ "node_group_template": { "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "is_protected": false, "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "hadoop_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "security_groups": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "new", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } } ././@PaxHeader0000000000000000000000000000020600000000000011453 xustar0000000000000000112 path=sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-templates-list-response.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/node-group-templates/node-group-templates-list-response.js0000664000175000017500000000510000000000000033321 0ustar00zuulzuul00000000000000{ "node_group_templates": [ { "is_public": false, "image_id": null, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "description": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "is_protected": false, "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "security_groups": null, "volume_type": null }, { "is_public": false, "image_id": null, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "flavor_id": "2", "id": "846edb31-add5-46e6-a4ee-a4c339f99251", "description": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:27:00", "is_protected": false, "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "security_groups": null, "volume_type": null } ], "markers": { "prev":"39dfc852-8588-4b61-8d2b-eb08a67ab240", "next":"eaa0bd97-ab54-43df-83ab-77a9774d7358" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.625891 sahara-16.0.0/api-ref/source/v1.1/samples/plugins/0000775000175000017500000000000000000000000021515 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/plugins/plugin-show-response.json0000664000175000017500000000060000000000000026514 0ustar00zuulzuul00000000000000{ "plugin": { "name": "vanilla", "versions": [ "1.2.1", "2.4.1", "2.6.0" ], "title": "Vanilla Apache Hadoop", "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/plugins/plugin-update-request.json0000664000175000017500000000013400000000000026652 0ustar00zuulzuul00000000000000{ "plugin_labels": { "enabled": { "status": false } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/plugins/plugin-update-response.json0000664000175000017500000000172200000000000027024 0ustar00zuulzuul00000000000000{ "plugin": { "plugin_labels": { "hidden": { "status": true, "mutable": true, "description": "Existence of plugin or its version is hidden, but still can be used for cluster creation by CLI and directly by client." }, "enabled": { "status": false, "mutable": true, "description": "Plugin or its version is enabled and can be used by user." } }, "description": "It's a fake plugin that aimed to work on the CirrOS images. It doesn't install Hadoop. It's needed to be able to test provisioning part of Sahara codebase itself.", "versions": [ "0.1" ], "tenant_id": "993f53c1f51845e48e013aeb632358d8", "title": "Fake Plugin", "version_labels": { "0.1": { "enabled": { "status": true, "mutable": true, "description": "Plugin or its version is enabled and can be used by user." } } }, "name": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/plugins/plugin-version-show-response.json0000664000175000017500000000552700000000000030214 0ustar00zuulzuul00000000000000{ "plugin": { "name": "vanilla", "versions": [ "1.2.1", "2.4.1", "2.6.0" ], "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "required_image_tags": [ "vanilla", "2.6.0" ], "node_processes": { "JobFlow": [ "oozie" ], "HDFS": [ "namenode", "datanode", "secondarynamenode" ], "YARN": [ "resourcemanager", "nodemanager" ], "MapReduce": [ "historyserver" ], "Hadoop": [], "Hive": [ "hiveserver" ] }, "configs": [ { "default_value": "/tmp/hadoop-${user.name}", "name": "hadoop.tmp.dir", "priority": 2, "config_type": "string", "applicable_target": "HDFS", "is_optional": true, "scope": "node", "description": "A base for other temporary directories." }, { "default_value": true, "name": "hadoop.native.lib", "priority": 2, "config_type": "bool", "applicable_target": "HDFS", "is_optional": true, "scope": "node", "description": "Should native hadoop libraries, if present, be used." }, { "default_value": 1024, "name": "NodeManager Heap Size", "config_values": null, "priority": 1, "config_type": "int", "applicable_target": "YARN", "is_optional": false, "scope": "node", "description": null }, { "default_value": true, "name": "Enable Swift", "config_values": null, "priority": 1, "config_type": "bool", "applicable_target": "general", "is_optional": false, "scope": "cluster", "description": null }, { "default_value": true, "name": "Enable MySQL", "config_values": null, "priority": 1, "config_type": "bool", "applicable_target": "general", "is_optional": true, "scope": "cluster", "description": null } ], "title": "Vanilla Apache Hadoop" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v1.1/samples/plugins/plugins-list-response.json0000664000175000017500000000261700000000000026704 0ustar00zuulzuul00000000000000{ "plugins": [ { "name": "vanilla", "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": [ "1.2.1", "2.4.1", "2.6.0" ], "title": "Vanilla Apache Hadoop" }, { "name": "hdp", "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": [ "1.3.2", "2.0.6" ], "title": "Hortonworks Data Platform" }, { "name": "spark", "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", "versions": [ "1.0.0", "0.9.1" ], "title": "Apache Spark" }, { "name": "cdh", "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": [ "5", "5.3.0" ], "title": "Cloudera Plugin" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.629891 sahara-16.0.0/api-ref/source/v2/0000775000175000017500000000000000000000000016232 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/cluster-templates.inc0000664000175000017500000001134600000000000022407 0ustar00zuulzuul00000000000000.. -*- rst -*- ================= Cluster templates ================= A cluster template configures a cluster. A cluster template lists node groups with the number of instances in each group. You can also define cluster-scoped configurations in a cluster template. Show cluster template details ============================= .. rest_method:: GET /v2/cluster-templates/{cluster_template_id} Shows details for a cluster template. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - cluster_template_id: url_cluster_template_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - project_id: project_id - node_groups: node_groups - is_public: object_is_public - plugin_version: plugin_version - id: cluster_template_id - name: cluster_template_name Response Example ---------------- .. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json :language: javascript Update cluster templates ======================== .. rest_method:: PATCH /v2/cluster-templates/{cluster_template_id} Updates a cluster template. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - cluster_template_id: cluster_template_id Request Example --------------- .. literalinclude:: samples/cluster-templates/cluster-template-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - project_id: project_id - node_groups: node_groups - is_public: object_is_public - plugin_version: plugin_version - id: cluster_template_id - name: cluster_template_name Delete cluster template ======================= .. rest_method:: DELETE /v2/cluster-templates/{cluster_template_id} Deletes a cluster template. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - cluster_template_id: cluster_template_id List cluster templates ====================== .. rest_method:: GET /v2/cluster-templates Lists available cluster templates. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_by: sort_by_cluster_templates Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - project_id: project_id - node_groups: node_groups - is_public: object_is_public - plugin_version: plugin_version - id: cluster_template_id - name: cluster_template_name Response Example ---------------- .. rest_method:: GET /v2/cluster-templates?limit=2 .. literalinclude:: samples/cluster-templates/cluster-templates-list-response.json :language: javascript Create cluster templates ======================== .. rest_method:: POST /v2/cluster-templates Creates a cluster template. Normal response codes:202 Request Example --------------- .. literalinclude:: samples/cluster-templates/cluster-template-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: cluster_template_description - use_autoconfig: use_autoconfig - cluster_configs: cluster_configs - created_at: created_at - default_image_id: default_image_id - updated_at: updated_at - plugin_name: plugin_name - is_default: is_default - is_protected: object_is_protected - shares: object_shares - domain_name: domain_name - project_id: project_id - node_groups: node_groups - is_public: object_is_public - plugin_version: plugin_version - id: cluster_template_id - name: cluster_template_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/clusters.inc0000664000175000017500000001143000000000000020570 0ustar00zuulzuul00000000000000.. -*- rst -*- ======== Clusters ======== A cluster is a group of nodes with the same configuration. List available clusters ======================= .. rest_method:: GET /v2/clusters Lists available clusters. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_by: sort_by_clusters Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - clusters: clusters - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Response Example ---------------- .. rest_method:: GET /v2/clusters .. literalinclude:: samples/clusters/clusters-list-response.json :language: javascript Create cluster ============== .. rest_method:: POST /v2/clusters Creates a cluster. Normal response codes: 202 Request Example --------------- .. literalinclude:: samples/clusters/cluster-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Show details of a cluster ========================= .. rest_method:: GET /v2/clusters/{cluster_id} Shows details for a cluster, by ID. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - cluster_id: url_cluster_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Response Example ---------------- .. literalinclude:: samples/clusters/cluster-show-response.json :language: javascript Delete a cluster ================ .. rest_method:: DELETE /v2/clusters/{cluster_id} Deletes a cluster. Normal response codes: 204 or 200 Request ------- .. rest_parameters:: parameters.yaml - cluster_id: url_cluster_id - force: force Scale cluster ============= .. rest_method:: PUT /v2/clusters/{cluster_id} Scales a cluster. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - cluster_id: cluster_id Request Example --------------- .. literalinclude:: samples/clusters/cluster-scale-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Update cluster ============== .. rest_method:: PATCH /v2/clusters/{cluster_id} Updates a cluster. Normal response codes: 202 Request ------- .. rest_parameters:: parameters.yaml - cluster_id: url_cluster_id Request Example --------------- .. literalinclude:: samples/clusters/cluster-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - count: count - info: info - cluster_template_id: cluster_template_id - is_transient: is_transient - provision_progress: provision_progress - status: status - neutron_management_network: neutron_management_network - management_public_key: management_public_key - status_description: status_description - trust_id: trust_id - domain_name: domain_name Show progress ============= .. rest_method:: GET /v2/clusters/{cluster_id} Shows provisioning progress for a cluster. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - cluster_id: url_cluster_id Response Example ---------------- .. literalinclude:: samples/event-log/cluster-progress-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/data-sources.inc0000664000175000017500000000613500000000000021324 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ Data sources ============ A data source object defines the location of input or output for MapReduce jobs and might reference different types of storage. The Data Processing service does not validate data source locations. Show data source details ======================== .. rest_method:: GET /v2/data-sources/{data_source_id} Shows details for a data source. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - data_source_id: url_data_source_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: data_source_description - url: url - project_id: project_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - type: type - id: data_source_id - name: data_source_name Response Example ---------------- .. literalinclude:: samples/data-sources/data-source-show-response.json :language: javascript Delete data source ================== .. rest_method:: DELETE /v2/data-sources/{data_source_id} Deletes a data source. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - data_source_id: url_data_source_id Update data source ================== .. rest_method:: PATCH /v2/data-sources/{data_source_id} Updates a data source. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - data_source_id: url_data_source_id Request Example --------------- .. literalinclude:: samples/data-sources/data-source-update-request.json :language: javascript List data sources ================= .. rest_method:: GET /v2/data-sources Lists all data sources. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_by: sort_by_data_sources Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - description: data_source_description - url: url - project_id: project_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - type: type - id: data_source_id - name: data_source_name Response Example ---------------- .. rest_method:: GET /v2/data-sourses?sort_by=-name .. literalinclude:: samples/data-sources/data-sources-list-response.json :language: javascript Create data source ================== .. rest_method:: POST /v2/data-sources Creates a data source. Normal response codes:202 Request Example --------------- .. literalinclude:: samples/data-sources/data-source-register-hdfs-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: data_source_description - url: url - project_id: project_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - type: type - id: data_source_id - name: data_source_name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/event-log.inc0000664000175000017500000000110700000000000020624 0ustar00zuulzuul00000000000000.. -*- rst -*- ========= Event log ========= The event log feature provides information about cluster provisioning. In the event of errors, the event log shows the reason for the failure. Show progress ============= .. rest_method:: GET /v2/clusters/{cluster_id} Shows provisioning progress of cluster. Normal response codes: 200 Error response codes: Request ------- .. rest_parameters:: parameters.yaml - cluster_id: cluster_id Response Example ---------------- .. literalinclude:: samples/event-log/cluster-progress-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/image-registry.inc0000664000175000017500000000710300000000000021656 0ustar00zuulzuul00000000000000.. -*- rst -*- ============== Image registry ============== Use the image registry tool to manage images, add tags to and remove tags from images, and define the user name for an instance operating system. Each plugin lists required tags for an image. To run remote operations, the Data Processing service requires a user name with which to log in to the operating system for an instance. Add tags to image ================= .. rest_method:: PUT /v2/images/{image_id}/tags Adds tags to an image. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - tags: tags - image_id: url_image_id Request Example --------------- .. literalinclude:: samples/image-registry/image-tags-add-request.json :language: javascript Show image details ================== .. rest_method:: GET /v2/images/{image_id} Shows details for an image. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - image_id: url_image_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - username: username - updated: updated - description: image_description - created: created - image: image - tags: tags - minDisk: minDisk - name: image_name - progress: progress - minRam: minRam - id: image_id - metadata: metadata Response Example ---------------- .. literalinclude:: samples/image-registry/image-show-response.json :language: javascript Register image ============== .. rest_method:: POST /v2/images/{image_id} Registers an image in the registry. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - username: username - description: image_description - image_id: url_image_id Request Example --------------- .. literalinclude:: samples/image-registry/image-register-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - username: username - updated: updated - description: image_description - created: created - image: image - tags: tags - minDisk: minDisk - name: image_name - progress: progress - minRam: minRam - id: image_id - metadata: metadata Unregister image ================ .. rest_method:: DELETE /v2/images/{image_id} Removes an image from the registry. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - image_id: url_image_id Remove tags from image ====================== .. rest_method:: DELETE /v2/images/{image_id}/tag Removes tags from an image. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - tags: tags - image_id: url_image_id Request Example --------------- .. literalinclude:: samples/image-registry/image-tags-delete-request.json :language: javascript List images =========== .. rest_method:: GET /v2/images Lists all images registered in the registry. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - tags: tags Response Parameters ------------------- .. rest_parameters:: parameters.yaml - status: status - username: username - updated: updated - description: image_description - created: created - image: image - tags: tags - minDisk: minDisk - name: image_name - images: images - progress: progress - minRam: minRam - id: image_id - metadata: metadata Response Example ---------------- .. literalinclude:: samples/image-registry/images-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/index.rst0000664000175000017500000000065600000000000020102 0ustar00zuulzuul00000000000000:tocdepth: 3 ---------------------- Data Processing API v2 ---------------------- .. rest_expand_all:: .. include:: cluster-templates.inc .. include:: clusters.inc .. include:: data-sources.inc .. include:: event-log.inc .. include:: image-registry.inc .. include:: job-binaries.inc .. include:: job-templates.inc .. include:: job-types.inc .. include:: jobs.inc .. include:: node-group-templates.inc .. include:: plugins.inc ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/job-binaries.inc0000664000175000017500000000756500000000000021306 0ustar00zuulzuul00000000000000.. -*- rst -*- ============ Job binaries ============ Job binary objects represent data processing applications and libraries that are stored in Object Storage service(S3 or Swift) or in Manila Shares. List job binaries ================= .. rest_method:: GET /v2/job-binaries Lists the available job binaries. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_by: sort_by_job_binary Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - description: job_binary_description - url: url - project_id: project_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - binaries: binaries - id: job_binary_id - name: job_binary_name Response Example ---------------- .. rest_method:: GET /v2/job-binaries?sort_by=created_at .. literalinclude:: samples/job-binaries/list-response.json :language: javascript Create job binary ================= .. rest_method:: POST /v2/job-binaries Creates a job binary. Normal response codes:202 Request Example --------------- .. literalinclude:: samples/job-binaries/create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_binary_description - url: url - project_id: project_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - id: job_binary_id - name: job_binary_name Show job binary details ======================= .. rest_method:: GET /v2/job-binaries/{job_binary_id} Shows details for a job binary. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - job_binary_id: url_job_binary_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_binary_description - url: url - project_id: project_id - created_at: created_at - updated_at: updated_at - is_protected: object_is_protected - is_public: object_is_public - id: job_binary_id - name: job_binary_name Response Example ---------------- .. literalinclude:: samples/job-binaries/show-response.json :language: javascript Delete job binary ================= .. rest_method:: DELETE /v2/job-binaries/{job_binary_id} Deletes a job binary. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - job_binary_id: url_job_binary_id Update job binary ================= .. rest_method:: PATCH /v2/job-binaries/{job_binary_id} Updates a job binary. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - job_binary_id: url_job_binary_id Request Example --------------- .. literalinclude:: samples/job-binaries/update-request.json :language: javascript Show job binary data ==================== .. rest_method:: GET /v2/job-binaries/{job_binary_id}/data Shows data for a job binary. The response body shows the job binary raw data and the response headers show the data length. Example response: :: HTTP/1.1 200 OK Connection: keep-alive Content-Length: 161 Content-Type: text/html; charset=utf-8 Date: Sat, 28 Mar 2016 02:42:48 GMT A = load '$INPUT' using PigStorage(':') as (fruit: chararray); B = foreach A generate com.hadoopbook.pig.Trim(fruit); store B into '$OUTPUT' USING PigStorage(); Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - job_binary_id: url_job_binary_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - Content-Length: Content-Length Response Example ---------------- .. literalinclude:: samples/job-binaries/show-data-response :language: text ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/job-templates.inc0000664000175000017500000001011200000000000021466 0ustar00zuulzuul00000000000000.. -*- rst -*- ============= Job templates ============= A job templates object lists the binaries that a job needs to run. To run a job, you must specify data sources and job parameters. You can run a job on an existing or new transient cluster. List job templates ================== .. rest_method:: GET /v2/job-templates Lists all job templates. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_by: sort_by_job_templates Response Parameters ------------------- .. rest_parameters:: parameters.yaml - job_templates: job_templates - description: job_description - project_id: project_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_template_id - name: job_template_name - markers: markers - prev: prev - next: next Response Example ---------------- ..rest_method:: GET /v2/job-templates?limit=2 .. literalinclude:: samples/job-templates/job-templates-list-response.json :language: javascript Create job template =================== .. rest_method:: POST /v2/job-templates Creates a job object. Normal response codes:202 Request Example --------------- .. literalinclude:: samples/job-templates/job-template-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_description - project_id: project_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_template_id - name: job_template_name Show job template details ========================= .. rest_method:: GET /v2/job-templates/{job_template_id} Shows details for a job template. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - job_template_id: url_job_template_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_description - project_id: project_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_template_id - name: job_template_name Response Example ---------------- .. literalinclude:: samples/job-templates/job-template-show-response.json :language: javascript Remove job template =================== .. rest_method:: DELETE /v2/job-templates/{job_template_id} Removes a job. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - job_template_id: url_job_template_id Update job template object ========================== .. rest_method:: PATCH /v2/job-templates/{job_template_id} Updates a job template object. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - job_template_id: url_job_template_id Request Example --------------- .. literalinclude:: samples/job-templates/job-template-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - description: job_description - project_id: project_id - created_at: created_at - mains: mains - updated_at: updated_at - libs: libs - is_protected: object_is_protected - interface: interface - is_public: object_is_public - type: type - id: job_template_id - name: job_template_name Get job template config hints ============================= .. rest_method:: GET /v2/job-templates/config-hints/{job_type} Get job template config hints Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - job_type: url_job_type Response Parameters ------------------- .. rest_parameters:: parameters.yaml - job_config: job_config - args: args - configs: configs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/job-types.inc0000664000175000017500000000201200000000000020634 0ustar00zuulzuul00000000000000.. -*- rst -*- ========= Job types ========= Each plugin that supports EDP also supports specific job types. Different versions of a plugin might actually support different job types. Configuration options vary by plugin, version, and job type. The job types provide information about which plugins support which job types and how to configure the job types. List job types ============== .. rest_method:: GET /v2/job-types Lists all job types. You can use query parameters to filter the response. Normal response codes: 200 Error response codes: Request ------- .. rest_parameters:: parameters.yaml - plugin: plugin - version: version - type: type - hints: hints Response Parameters ------------------- .. rest_parameters:: parameters.yaml - versions: versions - title: title - description: description_plugin - job_types: job_types - name: plugin_name Response Example ---------------- .. literalinclude:: samples/job-types/job-types-list-response.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/jobs.inc0000664000175000017500000001014100000000000017657 0ustar00zuulzuul00000000000000.. -*- rst -*- ==== Jobs ==== A job object represents a job that runs on a cluster. A job polls the status of a running job and reports it to the user. Execute Job =========== .. rest_method:: POST /v2/jobs Executes a job. Normal response codes: 200 Request Example ---------------- .. rest_method:: /v2/jobs .. literalinclude:: samples/jobs/job-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - info: info - output_id: output_id - start_time: start_time - job_template_id: job_template_id - updated_at: updated_at - project_id: project_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_is_public - input_id: input_id - configs: configs - job: job - id: job_id Response Example ---------------- .. literalinclude:: samples/jobs/job-response.json :language: javascript List jobs ========= .. rest_method:: GET /v2/jobs Lists available jobs. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_by: sort_by_job Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - info: info - output_id: output_id - start_time: start_time - job_template_id: job_template_id - updated_at: updated_at - project_id: project_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_is_public - input_id: input_id - configs: configs - job: job - id: job_id - jobs: jobs Response Example ---------------- .. rest_method:: /v2/jobs .. literalinclude:: samples/jobs/list-response.json :language: javascript Show job ======== .. rest_method:: GET /v2/jobs/{job_id} Shows details for a job, by ID. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - job_id: url_job_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - info: info - output_id: output_id - start_time: start_time - job_template_id: job_template_id - updated_at: updated_at - project_id: project_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_is_public - input_id: input_id - configs: configs - job: job - id: job_id Response Example ---------------- .. literalinclude:: samples/jobs/job-response.json :language: javascript Delete job ========== .. rest_method:: DELETE /v2/jobs/{job_id} Deletes a job. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - job_id: url_job_id Update job ========== .. rest_method:: PATCH /v2/jobs/{job_id} Updates a job. Normal response codes:202 Request ------- .. rest_parameters:: parameters.yaml - job_id: url_job_id Request Example --------------- .. literalinclude:: samples/jobs/job-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - info: info - output_id: output_id - start_time: start_time - job_template_id: job_template_id - updated_at: updated_at - project_id: project_id - created_at: created_at - args: args - data_source_urls: data_source_urls - return_code: return_code - oozie_job_id: oozie_job_id - is_protected: is_protected_3 - cluster_id: cluster_id - end_time: end_time - params: params - is_public: job_is_public - input_id: input_id - configs: configs - job: job - id: job_id ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/node-group-templates.inc0000664000175000017500000001434400000000000023006 0ustar00zuulzuul00000000000000.. -*- rst -*- ==================== Node group templates ==================== A cluster is a group of nodes with the same configuration. A node group template configures a node in the cluster. A template configures Hadoop processes and VM characteristics, such as the number of reduced slots for task tracker, the number of CPUs, and the amount of RAM. The template specifies the VM characteristics through an OpenStack flavor. List node group templates ========================= .. rest_method:: GET /v2/node-group-templates Lists available node group templates. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - marker: marker - sort_by: sort_by_node_group_templates Response Parameters ------------------- .. rest_parameters:: parameters.yaml - markers: markers - prev: prev - next: next - volume_local_to_instance: volume_local_to_instance - availability_zone: availability_zone - updated_at: updated_at - use_autoconfig: use_autoconfig - volumes_per_node: volumes_per_node - id: node_group_template_id - security_groups: security_groups - shares: object_shares - node_configs: node_configs - auto_security_group: auto_security_group - volumes_availability_zone: volumes_availability_zone - description: node_group_template_description - volume_mount_prefix: volume_mount_prefix - plugin_name: plugin_name - floating_ip_pool: floating_ip_pool - is_default: is_default - image_id: image_id - volumes_size: volumes_size - is_proxy_gateway: is_proxy_gateway - is_public: object_is_public - plugin_version: plugin_version - name: node_group_template_name - project_id: project_id - created_at: created_at - volume_type: volume_type - is_protected: object_is_protected - node_processes: node_processes - flavor_id: flavor_id Response Example ---------------- .. rest_method:: GET /v2/node-group-templates?limit=2&marker=38b4e146-1d39-4822-bad2-fef1bf304a52&sort_by=name .. literalinclude:: samples/node-group-templates/node-group-templates-list-response.json :language: javascript Create node group template ========================== .. rest_method:: POST /v2/node-group-templates Creates a node group template. Normal response codes: 202 Request Example --------------- .. literalinclude:: samples/node-group-templates/node-group-template-create-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_local_to_instance: volume_local_to_instance - availability_zone: availability_zone - updated_at: updated_at - use_autoconfig: use_autoconfig - volumes_per_node: volumes_per_node - id: node_group_template_id - security_groups: security_groups - shares: object_shares - node_configs: node_configs - auto_security_group: auto_security_group - volumes_availability_zone: volumes_availability_zone - description: node_group_template_description - volume_mount_prefix: volume_mount_prefix - plugin_name: plugin_name - floating_ip_pool: floating_ip_pool - is_default: is_default - image_id: image_id - volumes_size: volumes_size - is_proxy_gateway: is_proxy_gateway - is_public: object_is_public - plugin_version: plugin_version - name: node_group_template_name - project_id: project_id - created_at: created_at - volume_type: volume_type - is_protected: object_is_protected - node_processes: node_processes - flavor_id: flavor_id Show node group template details ================================ .. rest_method:: GET /v2/node-group-templates/{node_group_template_id} Shows a node group template, by ID. Normal response codes: 200 Request ------- .. rest_parameters:: parameters.yaml - node_group_template_id: url_node_group_template_id Response Parameters ------------------- .. rest_parameters:: parameters.yaml - volume_local_to_instance: volume_local_to_instance - availability_zone: availability_zone - updated_at: updated_at - use_autoconfig: use_autoconfig - volumes_per_node: volumes_per_node - id: node_group_template_id - security_groups: security_groups - shares: object_shares - node_configs: node_configs - auto_security_group: auto_security_group - volumes_availability_zone: volumes_availability_zone - description: node_group_template_description - volume_mount_prefix: volume_mount_prefix - plugin_name: plugin_name - floating_ip_pool: floating_ip_pool - is_default: is_default - image_id: image_id - volumes_size: volumes_size - is_proxy_gateway: is_proxy_gateway - is_public: object_is_public - plugin_version: plugin_version - name: node_group_template_name - project_id: project_id - created_at: created_at - volume_type: volume_type - is_protected: object_is_protected - node_processes: node_processes - flavor_id: flavor_id Response Example ---------------- .. literalinclude:: samples/node-group-templates/node-group-template-show-response.json :language: javascript Delete node group template ========================== .. rest_method:: DELETE /v2/node-group-templates/{node_group_template_id} Deletes a node group template. Normal response codes:204 Request ------- .. rest_parameters:: parameters.yaml - node_group_template_id: url_node_group_template_id Update node group template ========================== .. rest_method:: PATCH /v2/node-group-templates/{node_group_template_id} Updates a node group template. Normal respose codes:202 Request ------- .. rest_parameters:: parameters.yaml - node_group_template_id: url_node_group_template_id Request Example --------------- .. literalinclude:: samples/node-group-templates/node-group-template-update-request.json :language: javascript Export node group template ========================== .. rest_method:: GET /v2/node-group-templates/{node_group_template_id}/export Exports a node group template. Normal respose codes:202 Request ------- .. rest_parameters:: parameters.yaml - node_group_template_id: url_node_group_template_id Request Example --------------- .. literalinclude:: samples/node-group-templates/node-group-template-update-request.json :language: javascript ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/parameters.yaml0000664000175000017500000005743700000000000021301 0ustar00zuulzuul00000000000000# variables in header Content-Length: description: | The length of the data, in bytes. in: header required: true type: string # variables in path hints: description: | Includes configuration hints in the response. in: path required: false type: boolean job_binary_id: description: | The UUID of the job binary. in: path required: true type: string limit: description: | Maximum number of objects in response data. in: path required: false type: integer marker: description: | ID of the last element on the list which won't be in response. in: path required: false type: string plugin: description: | Filters the response by a plugin name. in: path required: false type: string sort_by_cluster_templates: description: | The field for sorting cluster templates. this parameter accepts the following values: ``name``, ``plugin_name``, ``plugin_version``, ``created_at``, ``updated_at``, ``id``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_clusters: description: | The field for sorting clusters. this parameter accepts the following values: ``name``, ``plugin_name``, ``plugin_version``, ``status``, ``id``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_data_sources: description: | The field for sorting data sources. this parameter accepts the following values: ``id``, ``name``, ``type``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_job: description: | The field for sorting job executions. this parameter accepts the following values: ``id``, ``job_template``, ``cluster``, ``status``. Also this values can started with ``-`` prefix for descending sort. For example: ``-cluster``. in: path required: false type: string sort_by_job_binary: description: | The field for sorting job binaries. this parameter accepts the following values: ``id``, ``name``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_job_binary_internals: description: | The field for sorting job binary internals. this parameter accepts the following values: ``id``, ``name``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_job_templates: description: | The field for sorting jobs. this parameter accepts the following values: ``id``, ``name``, ``type``, ``created_at``, ``updated_at``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string sort_by_node_group_templates: description: | The field for sorting node group templates. this parameter accepts the following values: ``name``, ``plugin_name``, ``plugin_version``, ``created_at``, ``updated_at``, ``id``. Also this values can started with ``-`` prefix for descending sort. For example: ``-name``. in: path required: false type: string type_2: description: | Filters the response by a job type. in: path required: false type: string url_cluster_id: description: | The ID of the cluster in: path required: true type: string url_cluster_template_id: description: | The unique identifier of the cluster template. in: path required: true type: string url_data_source_id: description: | The UUID of the data source. in: path required: true type: string url_image_id: description: | The UUID of the image. in: path required: true type: string url_job_binary_id: description: | The UUID of the job binary. in: path required: true type: string url_job_binary_internals_id: description: | The UUID of the job binary internal. in: path required: true type: string url_job_binary_internals_name: description: | The name of the job binary internal. in: path required: true type: string url_job_id: description: | The UUID of the job. in: path required: true type: string url_job_template_id: description: | The UUID of the template job. in: path required: true type: string url_job_type: description: | The job type. in: path required: true type: string url_node_group_template_id: description: | The UUID of the node group template. in: path required: true type: string url_plugin_name: description: | Name of the plugin. in: path required: true type: string url_project_id: description: | UUID of the project. in: path required: true type: string version: description: | Filters the response by a plugin version. in: path required: true type: string version_1: description: | Version of the plugin. in: path required: false type: string # variables in body args: description: | The list of arguments. in: body required: true type: array auto_security_group: description: | If set to ``True``, the cluster group is automatically secured. in: body required: true type: boolean availability_zone: description: | The availability of the node in the cluster. in: body required: true type: string binaries: description: | The list of job binary internal objects. in: body required: true type: array cluster_configs: description: | A set of key and value pairs that contain the cluster configuration. in: body required: true type: object cluster_id: description: | The UUID of the cluster. in: body required: true type: string cluster_template_description: description: | Description of the cluster template in: body required: false type: string cluster_template_id: description: | The UUID of the cluster template. in: body required: true type: string cluster_template_name: description: | The name of the cluster template. in: body required: true type: string clusters: description: | The list of clusters. in: body required: true type: array configs: description: | The mappings of the job tasks. in: body required: true type: object count: description: | The number of nodes in the cluster. in: body required: true type: integer created: description: | The date and time when the image was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string created_at: description: | The date and time when the cluster was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string created_at_1: description: | The date and time when the object was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string created_at_2: description: | The date and time when the node was created in the cluster. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string created_at_3: description: | The date and time when the job execution object was created. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string data_source_description: description: | The description of the data source object. in: body required: true type: string data_source_id: description: | The UUID of the data source. in: body required: true type: string data_source_name: description: | The name of the data source. in: body required: true type: string data_source_urls: description: | The data source URLs. in: body required: true type: object datasize: description: | The size of the data stored in the internal database. in: body required: true type: integer default_image_id: description: | The default ID of the image. in: body required: true type: string description: description: | The description of the cluster. in: body required: true type: string description_3: description: | The description of the node in the cluster. in: body required: true type: string description_7: description: | Description of the image. in: body required: false type: string description_plugin: description: | The full description of the plugin. in: body required: true type: string domain_name: description: | Domain name for internal and external hostname resolution. Required if DNS service is enabled. in: body required: false type: string end_time: description: | The end date and time of the job execution. The date and time when the job completed execution. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string flavor_id: description: | The ID of the flavor. in: body required: true type: string floating_ip_pool: description: | The UUID of the pool in the template. in: body required: true type: string force: description: | If set to ``true``, Sahara will force cluster deletion. in: body required: false type: boolean id: description: | The UUID of the cluster. in: body required: true type: string id_1: description: | The ID of the object. in: body required: true type: string image: description: | A set of key and value pairs that contain image properties. in: body required: true type: object image_description: description: | The description of the image. in: body required: true type: string image_id: description: | The UUID of the image. in: body required: true type: string image_name: description: | The name of the operating system image. in: body required: true type: string images: description: | The list of images and their properties. in: body required: true type: array info: description: | A set of key and value pairs that contain cluster information. in: body required: true type: object info_1: description: | The report of the executed job objects. in: body required: true type: object input_id: description: | The UUID of the input. in: body required: true type: string interface: description: | The interfaces of the job object. in: body required: true type: array is_default: description: | If set to ``true``, the cluster is the default cluster. in: body required: true type: boolean is_protected: description: | If set to ``true``, the cluster is protected. in: body required: true type: boolean is_protected_2: description: | If set to ``true``, the node is protected. in: body required: true type: boolean is_protected_3: description: | If set to ``true``, the job execution object is protected. in: body required: true type: boolean is_proxy_gateway: description: | If set to ``true``, the node is the proxy gateway. in: body required: true type: boolean is_public: description: | If set to ``true``, the cluster is public. in: body required: true type: boolean is_transient: description: | If set to ``true``, the cluster is transient. in: body required: true type: boolean job: description: | A set of key and value pairs that contain the job object. in: body required: true type: object job_binary_description: description: | The description of the job binary object. in: body required: true type: string job_binary_internals_id: description: | The UUID of the job binary internal. in: body required: true type: string job_binary_internals_name: description: | The name of the job binary internal. in: body required: true type: string job_binary_name: description: | The name of the object. in: body required: true type: string job_config: description: | The job configuration. in: body required: true type: string job_description: description: | The description of the job object. in: body required: true type: string job_id: description: | The UUID of the job object. in: body required: true type: string job_is_public: description: | If set to ``true``, the job object is public. in: body required: true type: boolean job_name: description: | The name of the job object. in: body required: true type: string job_template_id: description: | The UUID of the job template object. in: body required: true type: string job_template_name: description: | The name of the job template object. in: body required: true type: string job_templates: description: | The list of the job templates. in: body required: true type: array job_types: description: | The list of plugins and their job types. in: body required: true type: array jobs: description: | The list of job objects. in: body required: true type: array libs: description: | The list of the job object properties. in: body required: true type: array mains: description: | The list of the job object and their properties. in: body required: true type: array management_public_key: description: | The SSH key for the management network. in: body required: true type: string markers: description: | The markers of previous and following pages of data. This field exists only if ``limit`` is passed to request. in: body required: false type: object metadata: description: | A set of key and value pairs that contain image metadata. in: body required: true type: object minDisk: description: | The minimum disk space, in GB. in: body required: true type: integer minRam: description: | The minimum amount of random access memory (RAM) for the image, in GB. in: body required: true type: integer name: description: | The name of the cluster. in: body required: true type: string name_1: description: | The name of the object. in: body required: true type: string neutron_management_network: description: | The UUID of the neutron management network. in: body required: true type: string next: description: | The marker of next page of list data. in: body required: false type: string node_configs: description: | A set of key and value pairs that contain the node configuration in the cluster. in: body required: true type: object node_group_template_description: description: | Description of the node group template in: body required: false type: string node_group_template_id: description: | The UUID of the node group template. in: body required: true type: string node_group_template_name: description: | The name of the node group template. in: body required: true type: string node_groups: description: | The detail properties of the node in key-value pairs. in: body required: true type: object node_processes: description: | The list of the processes performed by the node. in: body required: true type: array object_is_protected: description: | If set to ``true``, the object is protected. in: body required: true type: boolean object_is_public: description: | If set to ``true``, the object is public. in: body required: true type: boolean object_shares: description: | The sharing of resources in the cluster. in: body required: true type: string oozie_job_id: description: | The UUID of the ``oozie_job``. in: body required: true type: string output_id: description: | The UUID of the output of job execution object. in: body required: true type: string params: description: | The mappings of values to the parameters. in: body required: true type: object plugin_name: description: | The name of the plugin. in: body required: true type: string plugin_version: description: | The version of the Plugin used in the cluster. in: body required: true type: string plugin_version_1: description: | The version of the Plugin. in: body required: true type: string plugins: description: | The list of plugins. in: body required: true type: array prev: description: | The marker of previous page. May be ``null`` if previous page is first or if current page is first. in: body required: false type: string progress: description: | A progress indicator, as a percentage value, for the amount of image content that has been processed. in: body required: true type: integer project_id: description: | The UUID of the project. in: body required: true type: string provision_progress: description: | A list of the cluster progresses. in: body required: true type: array return_code: description: | The code returned after job has executed. in: body required: true type: string security_groups: description: | The security groups of the node. in: body required: true type: string shares: description: | The shares of the cluster. in: body required: true type: string start_time: description: | The date and time when the job started. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string status: description: | The status of the cluster. in: body required: true type: string status_1: description: | The current status of the image. in: body required: true type: string status_description: description: | The description of the cluster status. in: body required: true type: string tags: description: | List of tags to add. in: body required: true type: array tags_1: description: | Lists images only with specific tag. Can be used multiple times. in: body required: false type: string tags_2: description: | One or more image tags. in: body required: true type: array tags_3: description: | List of tags to remove. in: body required: true type: array tenant_id: description: | The UUID of the tenant. in: body required: true type: string title: description: | The title of the plugin. in: body required: true type: string trust_id: description: | The id of the trust. in: body required: true type: integer type: description: | The type of the data source object. in: body required: true type: string type_1: description: | The type of the job object. in: body required: true type: string updated: description: | The date and time when the image was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm For example, ``2015-08-27T09:49:58-05:00``. The ``±hh:mm`` value, if included, is the time zone as an offset from UTC. in: body required: true type: string updated_at: description: | The date and time when the cluster was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string updated_at_1: description: | The date and time when the object was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string updated_at_2: description: | The date and time when the node was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string updated_at_3: description: | The date and time when the job execution object was updated. The date and time stamp format is `ISO 8601 `_: :: CCYY-MM-DDThh:mm:ss±hh:mm The ``±hh:mm`` value, if included, returns the time zone as an offset from UTC. For example, ``2015-08-27T09:49:58-05:00``. in: body required: true type: string url: description: | The url of the data source object. in: body required: true type: string url_1: description: | The url of the job binary object. in: body required: true type: string use_autoconfig: description: | If set to ``true``, the cluster is auto configured. in: body required: true type: boolean use_autoconfig_1: description: | If set to ``true``, the node is auto configured. in: body required: true type: boolean username: description: | The name of the user for the image. in: body required: true type: string username_1: description: | The user name to log in to an instance operating system for remote operations execution. in: body required: true type: string versions: description: | The list of plugin versions. in: body required: true type: array volume_local_to_instance: description: | If set to ``true``, the volume is local to the instance. in: body required: true type: boolean volume_mount_prefix: description: | The mount point of the node. in: body required: true type: string volume_type: description: | The type of volume in a node. in: body required: true type: string volumes_availability_zone: description: | The availability zone of the volumes. in: body required: true type: string volumes_per_node: description: | The number of volumes for the node. in: body required: true type: integer volumes_size: description: | The size of the volumes in a node. in: body required: true type: integer ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/plugins.inc0000664000175000017500000000504700000000000020414 0ustar00zuulzuul00000000000000.. -*- rst -*- ======= Plugins ======= A plugin object defines the Hadoop or Spark version that it can install and which configurations can be set for the cluster. Show plugin details =================== .. rest_method:: GET /v2/plugins/{plugin_name} Shows details for a plugin. Normal response codes: 200 Error response codes: 400, 500 Request ------- .. rest_parameters:: parameters.yaml - plugin_name: url_plugin_name Response Parameters ------------------- .. rest_parameters:: parameters.yaml - versions: versions - title: title - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugin-show-response.json :language: javascript List plugins ============ .. rest_method:: GET /v2/plugins Lists all registered plugins. Normal response codes: 200 Error response codes: 400, 500 Response Parameters ------------------- .. rest_parameters:: parameters.yaml - title: title - versions: versions - plugins: plugins - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugins-list-response.json :language: javascript Show plugin version details =========================== .. rest_method:: GET /v2/plugins/{plugin_name}/{version} Shows details for a plugin version. Normal response codes: 200 Error response codes: 400, 500 Request ------- .. rest_parameters:: parameters.yaml - plugin_name: url_plugin_name - version: version Response Parameters ------------------- .. rest_parameters:: parameters.yaml - versions: versions - title: title - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugin-version-show-response.json :language: javascript Update plugin details ===================== .. rest_method:: PATCH /v2/plugins/{plugin_name} Updates details for a plugin. Normal response codes: 202 Error response codes: 400, 500 Request ------- .. rest_parameters:: parameters.yaml - plugin_name: url_plugin_name Request Example --------------- .. literalinclude:: samples/plugins/plugin-update-request.json :language: javascript Response Parameters ------------------- .. rest_parameters:: parameters.yaml - title: title - versions: versions - description: description_plugin - name: plugin_name Response Example ---------------- .. literalinclude:: samples/plugins/plugin-update-response.json :language: javascript ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.589891 sahara-16.0.0/api-ref/source/v2/samples/0000775000175000017500000000000000000000000017676 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.629891 sahara-16.0.0/api-ref/source/v2/samples/cluster-templates/0000775000175000017500000000000000000000000023353 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/cluster-templates/cluster-template-create-request.json0000664000175000017500000000065300000000000032473 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "plugin_version": "2.7.1", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251" }, { "name": "master", "count": 1, "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae" } ], "name": "cluster-template" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/cluster-templates/cluster-template-create-response.json0000664000175000017500000000574600000000000032651 0ustar00zuulzuul00000000000000{ "cluster_template": { "is_public": false, "anti_affinity": [], "name": "cluster-template", "created_at": "2015-09-14T10:38:44", "project_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": {}, "shares": null, "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": null, "is_default": false, "updated_at": null, "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "1751c04e-8f39-467e-a421-480961172d4b", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "3ee85068-c455-4391-9db2-b54a20b99df3", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": null, "domain_name": null, "plugin_version": "2.7.1", "use_autoconfig": true, "description": null, "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/cluster-templates/cluster-template-show-response.json0000664000175000017500000000601000000000000032347 0ustar00zuulzuul00000000000000{ "cluster_template": { "is_public": false, "anti_affinity": [], "name": "cluster-template", "created_at": "2015-09-14T10:38:44", "project_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": {}, "shares": null, "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": null, "is_default": false, "updated_at": null, "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "1751c04e-8f39-467e-a421-480961172d4b", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "3ee85068-c455-4391-9db2-b54a20b99df3", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "domain_name": null, "plugin_version": "2.7.1", "use_autoconfig": true, "description": null, "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/cluster-templates/cluster-template-update-request.json0000664000175000017500000000034300000000000032506 0ustar00zuulzuul00000000000000{ "description": "Updated template", "plugin_name": "vanilla", "plugin_version": "2.7.1", "name": "vanilla-updated", "cluster_configs": { "HDFS": { "dfs.replication": 2 } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/cluster-templates/cluster-template-update-response.json0000664000175000017500000000437100000000000032661 0ustar00zuulzuul00000000000000{ "cluster_template": { "is_public": false, "anti_affinity": [], "name": "vanilla-updated", "created_at": "2015-08-21T08:41:24", "project_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": { "HDFS": { "dfs.replication": 2 } }, "shares": null, "id": "84d47e85-6094-473f-bf6d-5a7e6e86564e", "default_image_id": null, "is_default": false, "updated_at": "2015-09-14T10:45:57", "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": {}, "JobFlow": {}, "MapReduce": {}, "Hive": {}, "Hadoop": {}, "HDFS": {} }, "auto_security_group": true, "availability_zone": "", "count": 1, "flavor_id": "3", "id": "57b966ab-617e-4735-bf60-0cb991208a52", "security_groups": [], "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-08-21T08:41:24", "node_group_template_id": "a5533187-3f14-42c3-ba3a-196c13fe0fb5", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "all", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "datanode", "historyserver", "resourcemanager", "nodemanager", "oozie" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": null, "domain_name": null, "plugin_version": "2.7.1", "use_autoconfig": true, "description": "Updated template", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/cluster-templates/cluster-templates-list-response.json0000664000175000017500000001267500000000000032543 0ustar00zuulzuul00000000000000{ "cluster_templates": [ { "is_public": false, "anti_affinity": [], "name": "cluster-template", "created_at": "2015-09-14T10:38:44", "project_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": {}, "shares": null, "id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": null, "is_default": false, "updated_at": null, "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "1751c04e-8f39-467e-a421-480961172d4b", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "3ee85068-c455-4391-9db2-b54a20b99df3", "security_groups": null, "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:38:44", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "domain_name": null, "plugin_version": "2.7.1", "use_autoconfig": true, "description": null, "is_protected": false }, { "is_public": true, "anti_affinity": [], "name": "asd", "created_at": "2015-08-18T08:39:39", "project_id": "808d5032ea0446889097723bfc8e919d", "cluster_configs": { "general": {} }, "shares": null, "id": "5a9c787c-2078-4f7d-9a66-27759be9051b", "default_image_id": null, "is_default": false, "updated_at": "2015-09-14T08:41:15", "plugin_name": "vanilla", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": true, "availability_zone": "", "count": 1, "flavor_id": "2", "id": "a65864dd-3f99-4d29-a011-f7711cc23fa0", "security_groups": [], "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-08-18T08:39:39", "node_group_template_id": "42ce49de-1b8f-41d5-8f4a-244ec0826d92", "updated_at": null, "volumes_per_node": 1, "is_proxy_gateway": false, "name": "asd", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "jobtracker" ], "volumes_size": 10, "volume_local_to_instance": false, "volume_type": null } ], "neutron_management_network": null, "domain_name": null, "plugin_version": "2.7.1", "use_autoconfig": true, "description": "", "is_protected": false } ], "markers": { "prev": null, "next": "2c76e0d3-56cd-4d28-bb4f-4808e538c7b9" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.633891 sahara-16.0.0/api-ref/source/v2/samples/clusters/0000775000175000017500000000000000000000000021542 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/cluster-create-request.json0000664000175000017500000000051300000000000027044 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "plugin_version": "2.7.1", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "user_keypair_id": "test", "name": "vanilla-cluster", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/cluster-create-response.json0000664000175000017500000001307200000000000027216 0ustar00zuulzuul00000000000000{ "cluster": { "is_public": false, "project_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": {}, "user_keypair_id": "test", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [], "plugin_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T10:57:12", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Validating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/cluster-scale-request.json0000664000175000017500000000045000000000000026670 0ustar00zuulzuul00000000000000{ "add_node_groups": [ { "count": 1, "name": "b-worker", "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622" } ], "resize_node_groups": [ { "count": 4, "name": "worker" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/cluster-scale-response.json0000664000175000017500000004112100000000000027036 0ustar00zuulzuul00000000000000{ "cluster": { "info": { "YARN": { "Web UI": "http://172.18.168.115:8088", "ResourceManager": "http://172.18.168.115:8032" }, "HDFS": { "Web UI": "http://172.18.168.115:50070", "NameNode": "hdfs://vanilla-cluster-master-0:9000" }, "MapReduce JobHistory Server": { "Web UI": "http://172.18.168.115:19888" }, "JobFlow": { "Oozie": "http://172.18.168.115:11000" } }, "plugin_name": "vanilla", "plugin_version": "2.7.1", "updated_at": "2015-09-14T11:01:15", "name": "vanilla-cluster", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "trust_id": null, "status_description": "", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "is_protected": false, "is_transient": false, "provision_progress": [ { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Create Heat stack", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:57:38", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:57:18", "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Configure instances", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:58:22", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:58:16", "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): Oozie", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:01:15", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:27", "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Configure instances", "step_type": "Plugin: configure cluster", "updated_at": "2015-09-14T10:59:21", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:58:22", "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Configure topology data", "step_type": "Plugin: configure cluster", "updated_at": "2015-09-14T10:59:37", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:59:21", "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 3, "successful": true, "step_name": "Start the following process(es): DataNodes, NodeManagers", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:11", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:01", "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Await DataNodes start up", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:21", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:11", "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): HistoryServer", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:27", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T11:00:21", "id": "c6327532-222b-416c-858f-73dbb32b8e97" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Wait for instance accessibility", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:58:14", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:57:41", "id": "d3eca726-8b44-473a-ac29-fba45a893725" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 0, "successful": true, "step_name": "Mount volumes to instances", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:58:15", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:58:14", "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): ResourceManager", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T11:00:00", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:59:55", "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 1, "successful": true, "step_name": "Start the following process(es): NameNode", "step_type": "Plugin: start cluster", "updated_at": "2015-09-14T10:59:54", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:59:38", "id": "e1701ff5-930a-4212-945a-43515dfe24d1" }, { "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42", "total": 4, "successful": true, "step_name": "Assign IPs", "step_type": "Engine: create cluster", "updated_at": "2015-09-14T10:57:41", "project_id": "808d5032ea0446889097723bfc8e919d", "created_at": "2015-09-14T10:57:38", "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9" } ], "status": "Active", "description": null, "use_autoconfig": true, "shares": null, "domain_name": null, "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "is_public": false, "project_id": "808d5032ea0446889097723bfc8e919d", "node_groups": [ { "volumes_per_node": 0, "volume_type": null, "updated_at": "2015-09-14T10:57:37", "name": "b-worker", "id": "b7a6dea4-c898-446b-8c67-4f378d4c06c4", "node_group_template_id": "bc270ffe-a086-4eeb-9baa-2f5a73504622", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048, "yarn.scheduler.maximum-allocation-mb": 2048 }, "MapReduce": { "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m", "mapreduce.reduce.memory.mb": 512, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "yarn.app.mapreduce.am.resource.mb": 256 } }, "auto_security_group": false, "volumes_availability_zone": null, "use_autoconfig": true, "security_groups": null, "shares": null, "node_processes": [ "datanode", "nodemanager" ], "availability_zone": null, "flavor_id": "2", "image_id": null, "volume_local_to_instance": false, "count": 1, "volumes_size": 0, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "volume_mount_prefix": "/volumes/disk", "instances": [], "is_proxy_gateway": false, "created_at": "2015-09-14T10:57:11" }, { "volumes_per_node": 0, "volume_type": null, "updated_at": "2015-09-14T10:57:36", "name": "master", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048, "yarn.scheduler.maximum-allocation-mb": 2048 }, "MapReduce": { "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m", "mapreduce.reduce.memory.mb": 512, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "yarn.app.mapreduce.am.resource.mb": 256 } }, "auto_security_group": false, "volumes_availability_zone": null, "use_autoconfig": true, "security_groups": null, "shares": null, "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "availability_zone": null, "flavor_id": "2", "image_id": null, "volume_local_to_instance": false, "count": 1, "volumes_size": 0, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "volume_mount_prefix": "/volumes/disk", "instances": [ { "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", "internal_ip": "10.50.0.60", "instance_name": "vanilla-cluster-master-0", "updated_at": "2015-09-14T10:57:39", "management_ip": "172.18.168.115", "created_at": "2015-09-14T10:57:36", "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491" } ], "is_proxy_gateway": false, "created_at": "2015-09-14T10:57:11" }, { "volumes_per_node": 0, "volume_type": null, "updated_at": "2015-09-14T10:57:37", "name": "worker", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048, "yarn.scheduler.maximum-allocation-mb": 2048 }, "MapReduce": { "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m", "mapreduce.reduce.memory.mb": 512, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "yarn.app.mapreduce.am.resource.mb": 256 } }, "auto_security_group": false, "volumes_availability_zone": null, "use_autoconfig": true, "security_groups": null, "shares": null, "node_processes": [ "datanode", "nodemanager" ], "availability_zone": null, "flavor_id": "2", "image_id": null, "volume_local_to_instance": false, "count": 4, "volumes_size": 0, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "volume_mount_prefix": "/volumes/disk", "instances": [ { "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", "internal_ip": "10.50.0.63", "instance_name": "vanilla-cluster-worker-0", "updated_at": "2015-09-14T10:57:39", "management_ip": "172.18.168.118", "created_at": "2015-09-14T10:57:37", "id": "f3633b30-c1e4-4144-930b-ab5b780b87be" }, { "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", "internal_ip": "10.50.0.62", "instance_name": "vanilla-cluster-worker-1", "updated_at": "2015-09-14T10:57:40", "management_ip": "172.18.168.117", "created_at": "2015-09-14T10:57:37", "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f" }, { "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", "internal_ip": "10.50.0.61", "instance_name": "vanilla-cluster-worker-2", "updated_at": "2015-09-14T10:57:40", "management_ip": "172.18.168.116", "created_at": "2015-09-14T10:57:37", "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7" } ], "is_proxy_gateway": false, "created_at": "2015-09-14T10:57:11" } ], "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "user_keypair_id": "apavlov", "anti_affinity": [], "created_at": "2015-09-14T10:57:11" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/cluster-show-response.json0000664000175000017500000001307200000000000026733 0ustar00zuulzuul00000000000000{ "cluster": { "is_public": false, "project_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": {}, "user_keypair_id": "test", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [], "plugin_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T10:57:12", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Validating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/cluster-update-request.json0000664000175000017500000000010000000000000027053 0ustar00zuulzuul00000000000000{ "name": "public-vanilla-cluster", "is_public": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/cluster-update-response.json0000664000175000017500000001310000000000000027225 0ustar00zuulzuul00000000000000{ "cluster": { "is_public": true, "project_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": {}, "user_keypair_id": "test", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:12", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [], "plugin_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T10:57:12", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "public-vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Validating" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/clusters-list-response.json0000664000175000017500000003756500000000000027126 0ustar00zuulzuul00000000000000{ "clusters": [ { "is_public": false, "project_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "domain_name": null, "status_description": "", "plugin_name": "vanilla", "neutron_management_network": "b1610452-2933-46b0-bf31-660cfa5621bd", "info": { "YARN": { "Web UI": "http://172.18.168.115:8088", "ResourceManager": "http://172.18.168.115:8032" }, "HDFS": { "Web UI": "http://172.18.168.115:50070", "NameNode": "hdfs://vanilla-cluster-master-0:9000" }, "JobFlow": { "Oozie": "http://172.18.168.115:11000" }, "MapReduce JobHistory Server": { "Web UI": "http://172.18.168.115:19888" } }, "user_keypair_id": "apavlov", "management_public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCfe9ARO+t9CybtuC1+cusDTeQL7wos1+U2dKPlCUJvNUn0PcunGefqWI4MUZPY9yGmvRqfINy7/xRQCzL0AwgqzwcCXamcK8JCC80uH7j8Vxa4kJheG1jxMoz/FpDSdRnzNZ+m7H5rjOwAQANhL7KatGLyCPQg9fqOoaIyCZE/A3fztm/XjJMpWnuANpUZubZtISEfu4UZKVk/DPSlBrbTZkTOvEog1LwZCZoTt0rq6a7PJFzJJkq0YecRudu/f3tpXbNe/F84sd9PhOSqcrRbm72WzglyEE8PuS1kuWpEz8G+Y5/0tQxnoh6khj9mgflrdCFuvpdutFLH4eN5MFDh Generated-by-Sahara\n", "id": "e172d86c-906d-418e-a29c-6189f53bfa42", "cluster_template_id": "57c92a7c-5c6a-42ea-9c6f-9f40a5aa4b36", "node_groups": [ { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 1, "flavor_id": "2", "id": "0fe07f2a-0275-4bc0-93b2-c3c1e48e2815", "security_groups": null, "use_autoconfig": true, "instances": [ { "created_at": "2015-09-14T10:57:36", "id": "4867d92e-cc7b-4cde-9a1a-149e91caa491", "management_ip": "172.18.168.115", "updated_at": "2015-09-14T10:57:39", "instance_id": "b9f16a07-88fc-423e-83a3-489598fe6737", "internal_ip": "10.50.0.60", "instance_name": "vanilla-cluster-master-0" } ], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "updated_at": "2015-09-14T10:57:36", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null }, { "image_id": null, "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": { "YARN": { "yarn.nodemanager.vmem-check-enabled": "false", "yarn.scheduler.maximum-allocation-mb": 2048, "yarn.scheduler.minimum-allocation-mb": 256, "yarn.nodemanager.resource.memory-mb": 2048 }, "MapReduce": { "yarn.app.mapreduce.am.resource.mb": 256, "mapreduce.task.io.sort.mb": 102, "mapreduce.reduce.java.opts": "-Xmx409m", "mapreduce.reduce.memory.mb": 512, "mapreduce.map.memory.mb": 256, "yarn.app.mapreduce.am.command-opts": "-Xmx204m", "mapreduce.map.java.opts": "-Xmx204m" } }, "auto_security_group": false, "availability_zone": null, "count": 3, "flavor_id": "2", "id": "c7a3bea4-c898-446b-8c67-6d378d4c06c4", "security_groups": null, "use_autoconfig": true, "instances": [ { "created_at": "2015-09-14T10:57:37", "id": "f3633b30-c1e4-4144-930b-ab5b780b87be", "management_ip": "172.18.168.118", "updated_at": "2015-09-14T10:57:39", "instance_id": "0cf1ee81-aa72-48da-be2c-65bc2fa51f8f", "internal_ip": "10.50.0.63", "instance_name": "vanilla-cluster-worker-0" }, { "created_at": "2015-09-14T10:57:37", "id": "0d66fd93-f277-4a94-b46a-f5866aa0c38f", "management_ip": "172.18.168.117", "updated_at": "2015-09-14T10:57:40", "instance_id": "4a937391-b594-4ad0-9a53-00a99a691383", "internal_ip": "10.50.0.62", "instance_name": "vanilla-cluster-worker-1" }, { "created_at": "2015-09-14T10:57:37", "id": "0982cefd-5c58-436e-8f1e-c1d0830f18a7", "management_ip": "172.18.168.116", "updated_at": "2015-09-14T10:57:40", "instance_id": "839b1d56-6d0d-4aa4-9d05-30e029c276f8", "internal_ip": "10.50.0.61", "instance_name": "vanilla-cluster-worker-2" } ], "volumes_availability_zone": null, "created_at": "2015-09-14T10:57:11", "node_group_template_id": "846edb31-add5-46e6-a4ee-a4c339f99251", "updated_at": "2015-09-14T10:57:37", "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } ], "provision_progress": [ { "created_at": "2015-09-14T10:57:18", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "0a6d95f9-30f4-4434-823a-a38a7999a5af", "step_type": "Engine: create cluster", "step_name": "Create Heat stack", "updated_at": "2015-09-14T10:57:38", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:58:16", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "29f2b587-c34c-4871-9ed9-9235b411cd9a", "step_type": "Engine: create cluster", "step_name": "Configure instances", "updated_at": "2015-09-14T10:58:22", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:27", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "36f1efde-90f9-41c1-b409-aa1cf9623e3e", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): Oozie", "updated_at": "2015-09-14T11:01:15", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:58:22", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "602bcc27-3a2d-42c8-8aca-ebc475319c72", "step_type": "Plugin: configure cluster", "step_name": "Configure instances", "updated_at": "2015-09-14T10:59:21", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:59:21", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "7e291df1-2d32-410d-ae89-33ab6f83cf17", "step_type": "Plugin: configure cluster", "step_name": "Configure topology data", "updated_at": "2015-09-14T10:59:37", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:01", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "8ab7933c-ad61-4a4f-88db-23ce78ee10f6", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): DataNodes, NodeManagers", "updated_at": "2015-09-14T11:00:11", "successful": true, "total": 3, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:11", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "9c8dc016-8c5b-4e80-9857-80c41f6bd971", "step_type": "Plugin: start cluster", "step_name": "Await DataNodes start up", "updated_at": "2015-09-14T11:00:21", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T11:00:21", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "c6327532-222b-416c-858f-73dbb32b8e97", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): HistoryServer", "updated_at": "2015-09-14T11:00:27", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:57:41", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "d3eca726-8b44-473a-ac29-fba45a893725", "step_type": "Engine: create cluster", "step_name": "Wait for instance accessibility", "updated_at": "2015-09-14T10:58:14", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:58:14", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "d7a875ff-64bf-41aa-882d-b5061c8ee152", "step_type": "Engine: create cluster", "step_name": "Mount volumes to instances", "updated_at": "2015-09-14T10:58:15", "successful": true, "total": 0, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:59:55", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "ded7d227-10b8-4cb0-ab6c-25da1462bb7a", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): ResourceManager", "updated_at": "2015-09-14T11:00:00", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:59:38", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "e1701ff5-930a-4212-945a-43515dfe24d1", "step_type": "Plugin: start cluster", "step_name": "Start the following process(es): NameNode", "updated_at": "2015-09-14T10:59:54", "successful": true, "total": 1, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" }, { "created_at": "2015-09-14T10:57:38", "project_id": "808d5032ea0446889097723bfc8e919d", "id": "eaf0ab1b-bf8f-48f0-8f2c-fa4f82f539b9", "step_type": "Engine: create cluster", "step_name": "Assign IPs", "updated_at": "2015-09-14T10:57:41", "successful": true, "total": 4, "cluster_id": "e172d86c-906d-418e-a29c-6189f53bfa42" } ], "plugin_version": "2.7.1", "use_autoconfig": true, "trust_id": null, "description": null, "created_at": "2015-09-14T10:57:11", "is_protected": false, "updated_at": "2015-09-14T11:01:15", "is_transient": false, "cluster_configs": { "HDFS": { "dfs.replication": 3 } }, "anti_affinity": [], "name": "vanilla-cluster", "default_image_id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "status": "Active" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/multiple-clusters-create-request.json0000664000175000017500000000056200000000000031064 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "plugin_version": "2.6.0", "cluster_template_id": "9951f86d-57ba-43d6-9cb0-14ed2ec7a6cf", "default_image_id": "bc3c3d3c-2684-4bf8-a9fa-388fb71288a9", "user_keypair_id": "test", "name": "def-cluster", "count": 2, "cluster_configs": {}, "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/clusters/multiple-clusters-create-response.json0000664000175000017500000000017300000000000031230 0ustar00zuulzuul00000000000000{ "clusters": [ "a007a3e7-658f-4568-b0f2-fe2fd5efc554", "b012a6et-65hf-4566-b0f2-fe3fd7efc567" ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.633891 sahara-16.0.0/api-ref/source/v2/samples/data-sources/0000775000175000017500000000000000000000000022270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-request.json0000664000175000017500000000022700000000000031625 0ustar00zuulzuul00000000000000{ "description": "This is hdfs input", "url": "hdfs://test-master-node:8020/user/hadoop/input", "type": "hdfs", "name": "hdfs_input" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-source-register-hdfs-response.json0000664000175000017500000000067500000000000032002 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:09:36.148464", "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", "updated_at": null, "name": "hdfs_input", "description": "This is hdfs input", "url": "hdfs://test-master-node:8020/user/hadoop/input", "type": "hdfs" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-source-register-swift-request.json0000664000175000017500000000031700000000000032035 0ustar00zuulzuul00000000000000{ "description": "This is input", "url": "swift://container/text", "credentials": { "password": "swordfish", "user": "dev" }, "type": "swift", "name": "swift_input" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-source-register-swift-response.json0000664000175000017500000000064200000000000032204 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:18:10.691493", "id": "953831f2-0852-49d8-ac71-af5805e25256", "updated_at": null, "name": "swift_input", "description": "This is input", "url": "swift://container/text", "type": "swift" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-source-show-response.json0000664000175000017500000000064200000000000030206 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:18:10.691493", "id": "953831f2-0852-49d8-ac71-af5805e25256", "updated_at": null, "name": "swift_input", "description": "This is input", "url": "swift://container/text", "type": "swift" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-source-update-request.json0000664000175000017500000000011000000000000030330 0ustar00zuulzuul00000000000000{ "description": "This is public input", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-source-update-response.json0000664000175000017500000000070000000000000030503 0ustar00zuulzuul00000000000000{ "data_source": { "is_public": true, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-09-15 12:32:24.847493", "id": "953831f2-0852-49d8-ac71-af5805e25256", "updated_at": "2015-09-15 12:34:42.597435", "name": "swift_input", "description": "This is public input", "url": "swift://container/text", "type": "swift" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/data-sources/data-sources-list-response.json0000664000175000017500000000165400000000000030370 0ustar00zuulzuul00000000000000{ "data_sources": [ { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:18:10", "id": "953831f2-0852-49d8-ac71-af5805e25256", "name": "swift_input", "updated_at": null, "description": "This is input", "url": "swift://container/text", "type": "swift" }, { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "is_protected": false, "created_at": "2015-03-26 11:09:36", "id": "d7fffe9c-3b42-46a9-8be8-e98f586fa7a9", "name": "hdfs_input", "updated_at": null, "description": "This is hdfs input", "url": "hdfs://test-master-node:8020/user/hadoop/input", "type": "hdfs" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.633891 sahara-16.0.0/api-ref/source/v2/samples/event-log/0000775000175000017500000000000000000000000021576 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/event-log/cluster-progress-response.json0000664000175000017500000000613200000000000027652 0ustar00zuulzuul00000000000000{ "status": "Error", "neutron_management_network": "7e31648b-4b2e-4f32-9b0a-113581c27076", "is_transient": false, "description": "", "user_keypair_id": "vgridnev", "updated_at": "2015-03-31 14:10:59", "plugin_name": "spark", "provision_progress": [ { "successful": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-03-31 14:10:20", "step_type": "Engine: create cluster", "updated_at": "2015-03-31 14:10:35", "events": [ { "instance_name": "sample-worker-spark-004", "successful": false, "created_at": "2015-03-31 14:10:35", "updated_at": null, "event_info": "Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", "instance_id": "b5ba5ba8-e9c1-47f7-9355-3ce0ec0e449d", "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", "id": "34afcfc7-bdb0-43cb-b142-283d560dc6ad" }, { "instance_name": "sample-worker-spark-001", "successful": true, "created_at": "2015-03-31 14:10:35", "updated_at": null, "event_info": null, "instance_id": "c532ab71-38da-475a-95f8-f8eb93b8f1c2", "node_group_id": "145cf2fb-dcdf-42af-a4b9-a4047d2919d4", "step_id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6", "id": "4ba50414-5216-4161-bc7a-12716122b99d" } ], "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", "step_name": "Wait for instances to become active", "total": 5, "id": "3f243c67-2c27-47c7-a0c0-0834ad17f8b6" }, { "successful": true, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-03-31 14:10:12", "step_type": "Engine: create cluster", "updated_at": "2015-03-31 14:10:19", "events": [], "cluster_id": "c26ec982-ba6b-4d75-818c-a50240164af0", "step_name": "Run instances", "total": 5, "id": "407ba50a-c799-46af-9dfb-6aa5f6ade426" } ], "anti_affinity": [], "node_groups": [], "management_public_key": "Sahara", "status_description": "Creating cluster failed for the following reason(s): Node sample-worker-spark-004 has error status\nError ID: 3e238c82-d1f5-4560-8ed8-691e923e16a0", "plugin_version": "1.0.0", "id": "c26ec982-ba6b-4d75-1f8c-a50240164af0", "trust_id": null, "info": {}, "cluster_template_id": "5a9a09a3-9349-43bd-9058-16c401fad2d5", "name": "sample", "cluster_configs": {}, "created_at": "2015-03-31 14:10:07", "default_image_id": "e6a6c5da-67be-4017-a7d2-81f466efe67e", "project_id": "9cd1314a0a31493282b6712b76a8fcda" } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.633891 sahara-16.0.0/api-ref/source/v2/samples/image-registry/0000775000175000017500000000000000000000000022626 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/image-register-request.json0000664000175000017500000000012100000000000030105 0ustar00zuulzuul00000000000000{ "username": "ubuntu", "description": "Ubuntu image for Hadoop 2.7.1" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/image-register-response.json0000664000175000017500000000134100000000000030260 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-03-24T10:05:10Z", "metadata": { "_sahara_description": "Ubuntu image for Hadoop 2.7.1", "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true, "_sahara_tag_2.7.1": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "2.7.1" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.7.1-ubuntu-14.04", "description": "Ubuntu image for Hadoop 2.7.1", "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/image-show-response.json0000664000175000017500000000120200000000000027410 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-02-03T10:29:32Z", "metadata": { "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true, "_sahara_tag_2.6.0": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "2.6.0" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.6.0-ubuntu-14.04", "description": null, "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/image-tags-add-request.json0000664000175000017500000000012500000000000027751 0ustar00zuulzuul00000000000000{ "tags": [ "vanilla", "2.7.1", "some_other_tag" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/image-tags-add-response.json0000664000175000017500000000145700000000000030130 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-03-24T10:18:33Z", "metadata": { "_sahara_tag_vanilla": true, "_sahara_description": "Ubuntu image for Hadoop 2.7.1", "_sahara_username": "ubuntu", "_sahara_tag_some_other_tag": true, "_sahara_tag_2.7.1": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "some_other_tag", "2.7.1" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.6.0-ubuntu-14.04", "description": "Ubuntu image for Hadoop 2.7.1", "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/image-tags-delete-request.json0000664000175000017500000000006100000000000030462 0ustar00zuulzuul00000000000000{ "tags": [ "some_other_tag" ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/image-tags-delete-response.json0000664000175000017500000000134100000000000030632 0ustar00zuulzuul00000000000000{ "image": { "updated": "2015-03-24T10:19:28Z", "metadata": { "_sahara_description": "Ubuntu image for Hadoop 2.7.1", "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true, "_sahara_tag_2.7.1": true }, "id": "bb8d12b5-f9bb-49f0-aecb-739b8a9bec89", "minDisk": 0, "status": "ACTIVE", "tags": [ "vanilla", "2.7.1" ], "minRam": 0, "progress": 100, "username": "ubuntu", "created": "2015-02-03T10:28:39Z", "name": "sahara-vanilla-2.7.1-ubuntu-14.04", "description": "Ubuntu image for Hadoop 2.7.1", "OS-EXT-IMG-SIZE:size": 1101856768 } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/image-registry/images-list-response.json0000664000175000017500000000261000000000000027572 0ustar00zuulzuul00000000000000{ "images": [ { "name": "ubuntu-vanilla-2.7.1", "id": "4118a476-dfdc-4b0e-8d5c-463cba08e9ae", "created": "2015-08-06T08:17:14Z", "metadata": { "_sahara_tag_2.7.1": true, "_sahara_username": "ubuntu", "_sahara_tag_vanilla": true }, "username": "ubuntu", "progress": 100, "OS-EXT-IMG-SIZE:size": 998716928, "status": "ACTIVE", "minDisk": 0, "tags": [ "vanilla", "2.7.1" ], "updated": "2015-09-04T09:35:09Z", "minRam": 0, "description": null }, { "name": "cdh-latest", "id": "ff74035b-9da7-4edf-981d-57f270ed337d", "created": "2015-09-04T11:56:44Z", "metadata": { "_sahara_username": "ubuntu", "_sahara_tag_5.4.0": true, "_sahara_tag_cdh": true }, "username": "ubuntu", "progress": 100, "OS-EXT-IMG-SIZE:size": 3281453056, "status": "ACTIVE", "minDisk": 0, "tags": [ "5.4.0", "cdh" ], "updated": "2015-09-04T12:46:42Z", "minRam": 0, "description": null } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.637891 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/0000775000175000017500000000000000000000000022242 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/create-request.json0000664000175000017500000000031400000000000026064 0ustar00zuulzuul00000000000000{ "url": "swift://container/jar-example.jar", "name": "jar-example.jar", "description": "This is a job binary", "extra": { "password": "swordfish", "user": "admin" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/create-response.json0000664000175000017500000000063600000000000026241 0ustar00zuulzuul00000000000000{ "job_binary": { "is_public": false, "description": "This is a job binary", "url": "swift://container/jar-example.jar", "project_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 14:49:20.106452", "id": "07f86352-ee8a-4b08-b737-d705ded5ff9c", "updated_at": null, "name": "jar-example.jar", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/list-response.json0000664000175000017500000000244000000000000025744 0ustar00zuulzuul00000000000000{ "binaries": [ { "is_public": false, "description": "", "url": "internal-db://d2498cbf-4589-484a-a814-81436c18beb3", "project_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 12:36:59.375060", "updated_at": null, "id": "84248975-3c82-4206-a58d-6e7fb3a563fd", "name": "example.pig", "is_protected": false }, { "is_public": false, "description": "", "url": "internal-db://22f1d87a-23c8-483e-a0dd-cb4a16dde5f9", "project_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 12:43:52.265899", "updated_at": null, "id": "508fc62d-1d58-4412-b603-bdab307bb926", "name": "udf.jar", "is_protected": false }, { "is_public": false, "description": "", "url": "swift://container/jar-example.jar", "project_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 14:25:04.970513", "updated_at": null, "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", "name": "jar-example.jar", "is_protected": false } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/show-data-response0000664000175000017500000000024000000000000025704 0ustar00zuulzuul00000000000000A = load '$INPUT' using PigStorage(':') as (fruit: chararray); B = foreach A generate com.hadoopbook.pig.Trim(fruit); store B into '$OUTPUT' USING PigStorage();././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/show-response.json0000664000175000017500000000063500000000000025755 0ustar00zuulzuul00000000000000{ "job_binary": { "is_public": false, "description": "an example jar file", "url": "swift://container/jar-example.jar", "project_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2013-10-15 14:25:04.970513", "updated_at": null, "id": "a716a9cd-9add-4b12-b1b6-cdb71aaef350", "name": "jar-example.jar", "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/update-request.json0000664000175000017500000000021100000000000026077 0ustar00zuulzuul00000000000000{ "url": "swift://container/new-jar-example.jar", "name": "new-jar-example.jar", "description": "This is a new job binary" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-binaries/update-response.json0000664000175000017500000000065200000000000026256 0ustar00zuulzuul00000000000000{ "job_binary": { "is_public": false, "description": "This is a new job binary", "url": "swift://container/new-jar-example.jar", "project_id": "11587919cc534bcbb1027a161c82cf58", "created_at": "2015-09-15 12:42:51.421542", "updated_at": null, "id": "b713d7ad-4add-4f12-g1b6-cdg71aaef350", "name": "new-jar-example.jar", "is_protected": false } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.637891 sahara-16.0.0/api-ref/source/v2/samples/job-templates/0000775000175000017500000000000000000000000022444 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-templates/job-template-create-request.json0000664000175000017500000000035400000000000030653 0ustar00zuulzuul00000000000000{ "description": "This is pig job example", "mains": [ "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e" ], "libs": [ "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27" ], "type": "Pig", "name": "pig-job-example" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-templates/job-template-create-response.json0000664000175000017500000000231300000000000031016 0ustar00zuulzuul00000000000000{ "job_template": { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-03-27 08:48:38.630827", "id": "71defc8f-d005-484f-9d86-1aedf644d1ef", "name": "pig-job-example", "description": "This is pig job example", "interface": [], "libs": [ { "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:53", "id": "320a2ca7-25fd-4b48-9bc3-4fb1b6c4ff27", "name": "binary-job", "updated_at": null, "description": "", "url": "internal-db://c6a925fa-ac1d-4b2e-b88a-7054e1927521" } ], "type": "Pig", "is_protected": false, "mains": [ { "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-03 10:47:51", "id": "90d9d5ec-11aa-48bd-bc8c-34936ce0db6e", "name": "pig", "updated_at": null, "description": "", "url": "internal-db://872878f6-72ea-44db-8d1d-e6a6396d2df0" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-templates/job-template-show-response.json0000664000175000017500000000150100000000000030531 0ustar00zuulzuul00000000000000{ "job_template": { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", "name": "Edp-test-job", "updated_at": null, "description": "", "interface": [], "libs": [ { "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", "name": "binary-job.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" } ], "type": "MapReduce", "mains": [], "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-templates/job-template-update-request.json0000664000175000017500000000013600000000000030670 0ustar00zuulzuul00000000000000{ "description": "This is public pig job example", "name": "public-pig-job-example" } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-templates/job-template-update-response.json0000664000175000017500000000155100000000000031040 0ustar00zuulzuul00000000000000{ "job_template": { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", "name": "public-pig-job-example", "updated_at": null, "description": "This is public pig job example", "interface": [], "libs": [ { "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", "name": "binary-job.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-c71e6bce.sahara/binary-job.jar" } ], "type": "MapReduce", "mains": [], "is_protected": false } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-templates/job-templates-list-response.json0000664000175000017500000000463700000000000030724 0ustar00zuulzuul00000000000000{ "job_templates": [ { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "1a674c31-9aaa-4d07-b844-2bf200a1b836", "name": "Edp-test-job-3d60854e", "updated_at": null, "description": "", "interface": [], "libs": [ { "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:48", "id": "0ff4ac10-94a4-4e25-9ac9-603afe27b100", "name": "binary-job-339c2d1a.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-c71e6bce.sahara/binary-job-339c2d1a.jar" } ], "type": "MapReduce", "mains": [], "is_protected": false }, { "is_public": false, "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:44", "id": "4d1f3759-3497-4927-8352-910bacf24e62", "name": "Edp-test-job-6b6953c8", "updated_at": null, "description": "", "interface": [], "libs": [ { "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:44", "id": "e0d47800-4ac1-4d63-a2e1-c92d669a44e2", "name": "binary-job-6f21a2f8.jar", "updated_at": null, "description": "", "url": "swift://Edp-test-b409ec68.sahara/binary-job-6f21a2f8.jar" } ], "type": "Pig", "mains": [ { "project_id": "9cd1314a0a31493282b6712b76a8fcda", "created_at": "2015-02-10 14:25:44", "id": "e073e896-f123-4b76-995f-901d786262df", "name": "binary-job-d4f8bd75.pig", "updated_at": null, "description": "", "url": "swift://Edp-test-b409ec68.sahara/binary-job-d4f8bd75.pig" } ], "is_protected": false } ], "markers": { "prev": null, "next": "c53832da-6e7b-449e-a166-9f9ce1718d03" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.637891 sahara-16.0.0/api-ref/source/v2/samples/job-types/0000775000175000017500000000000000000000000021612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/job-types/job-types-list-response.json0000664000175000017500000002117400000000000027233 0ustar00zuulzuul00000000000000{ "job_types": [ { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Hive" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Java" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "MapReduce" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "MapReduce.Streaming" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Pig" }, { "plugins": [ { "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": { "1.2.1": {}, "2.6.0": {} }, "title": "Vanilla Apache Hadoop", "name": "vanilla" }, { "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": { "1.3.2": {}, "2.0.6": {} }, "title": "Hortonworks Data Platform", "name": "hdp" }, { "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": { "5": {}, "5.3.0": {} }, "title": "Cloudera Plugin", "name": "cdh" } ], "name": "Shell" }, { "plugins": [ { "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", "versions": { "1.0.0": {} }, "title": "Apache Spark", "name": "spark" } ], "name": "Spark" } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.637891 sahara-16.0.0/api-ref/source/v2/samples/jobs/0000775000175000017500000000000000000000000020633 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/jobs/cancel-response.json0000664000175000017500000001345600000000000024620 0ustar00zuulzuul00000000000000{ "job": { "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "is_protected": false, "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "created_at": "2015-09-15T09:49:24", "end_time": "2015-09-15T12:50:46", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "is_public": false, "updated_at": "2015-09-15T09:50:46", "return_code": null, "data_source_urls": { "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" }, "tenant_id": "808d5032ea0446889097723bfc8e919d", "start_time": "2015-09-15T12:49:43", "id": "20da9edb-12ce-4b45-a473-41baeefef997", "oozie_job_id": "0000001-150915094349962-oozie-hado-W", "info": { "user": "hadoop", "actions": [ { "name": ":start:", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": "job-node", "data": null, "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", "errorCode": null, "id": "0000001-150915094349962-oozie-hado-W@:start:", "consoleUrl": "-", "errorMessage": null, "toString": "Action name[:start:] status[OK]", "stats": null, "type": ":START:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "job-node", "trackerUri": "http://172.18.168.119:8032", "externalStatus": "FAILED/KILLED", "status": "ERROR", "externalId": "job_1442310173665_0002", "transition": "fail", "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "JA018", "id": "0000001-150915094349962-oozie-hado-W@job-node", "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", "toString": "Action name[job-node] status[ERROR]", "stats": null, "type": "pig", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "fail", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": null, "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "E0729", "id": "0000001-150915094349962-oozie-hado-W@fail", "consoleUrl": "-", "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", "toString": "Action name[fail] status[OK]", "stats": null, "type": ":KILL:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", "externalChildIDs": null, "cred": "null" } ], "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", "status": "KILLED", "group": null, "externalId": null, "acl": null, "run": 0, "appName": "job-wf", "parentId": null, "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", "id": "0000001-150915094349962-oozie-hado-W", "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/jobs/job-request.json0000664000175000017500000000102700000000000023766 0ustar00zuulzuul00000000000000{ "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "job_template_id": "548ea8d4-a5sd-33a4-bt22-asf4n87a8e2dh", "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "job_configs": { "configs": { "mapred.map.tasks": "1", "mapred.reduce.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/jobs/job-response.json0000664000175000017500000000157500000000000024144 0ustar00zuulzuul00000000000000{ "job": { "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "is_protected": false, "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "created_at": "2015-09-15T09:49:24", "is_public": false, "id": "20da9edb-12ce-4b45-a473-41baeefef997", "project_id": "808d5032ea0446889097723bfc8e919d", "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "info": { "status": "PENDING" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/jobs/job-update-request.json0000664000175000017500000000003200000000000025241 0ustar00zuulzuul00000000000000{ "is_public": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/jobs/job-update-response.json0000664000175000017500000001345400000000000025423 0ustar00zuulzuul00000000000000{ "job: { "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "is_protected": false, "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "created_at": "2015-09-15T09:49:24", "end_time": "2015-09-15T12:50:46", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "is_public": true, "updated_at": "2015-09-15T09:50:46", "return_code": null, "data_source_urls": { "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" }, "tenant_id": "808d5032ea0446889097723bfc8e919d", "start_time": "2015-09-15T12:49:43", "id": "20da9edb-12ce-4b45-a473-41baeefef997", "oozie_job_id": "0000001-150915094349962-oozie-hado-W", "info": { "user": "hadoop", "actions": [ { "name": ":start:", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": "job-node", "data": null, "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", "errorCode": null, "id": "0000001-150915094349962-oozie-hado-W@:start:", "consoleUrl": "-", "errorMessage": null, "toString": "Action name[:start:] status[OK]", "stats": null, "type": ":START:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "job-node", "trackerUri": "http://172.18.168.119:8032", "externalStatus": "FAILED/KILLED", "status": "ERROR", "externalId": "job_1442310173665_0002", "transition": "fail", "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "JA018", "id": "0000001-150915094349962-oozie-hado-W@job-node", "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", "toString": "Action name[job-node] status[ERROR]", "stats": null, "type": "pig", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "fail", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": null, "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "E0729", "id": "0000001-150915094349962-oozie-hado-W@fail", "consoleUrl": "-", "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", "toString": "Action name[fail] status[OK]", "stats": null, "type": ":KILL:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", "externalChildIDs": null, "cred": "null" } ], "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", "status": "KILLED", "group": null, "externalId": null, "acl": null, "run": 0, "appName": "job-wf", "parentId": null, "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", "id": "0000001-150915094349962-oozie-hado-W", "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/jobs/list-response.json0000664000175000017500000001442300000000000024341 0ustar00zuulzuul00000000000000{ "jobs": [ { "job_configs": { "configs": { "mapred.reduce.tasks": "1", "mapred.map.tasks": "1" }, "args": [ "arg1", "arg2" ], "params": { "param2": "value2", "param1": "value1" } }, "is_protected": false, "input_id": "3e1bc8e6-8c69-4749-8e52-90d9341d15bc", "job_id": "310b0fc6-e1db-408e-8798-312e7500f3ac", "cluster_id": "811e1134-666f-4c48-bc92-afb5b10c9d8c", "created_at": "2015-09-15T09:49:24", "end_time": "2015-09-15T12:50:46", "output_id": "52146b52-6540-4aac-a024-fee253cf52a9", "is_public": false, "updated_at": "2015-09-15T09:50:46", "return_code": null, "data_source_urls": { "3e1bc8e6-8c69-4749-8e52-90d9341d15bc": "swift://ap-cont/input", "52146b52-6540-4aac-a024-fee253cf52a9": "swift://ap-cont/output" }, "tenant_id": "808d5032ea0446889097723bfc8e919d", "start_time": "2015-09-15T12:49:43", "id": "20da9edb-12ce-4b45-a473-41baeefef997", "oozie_job_id": "0000001-150915094349962-oozie-hado-W", "info": { "user": "hadoop", "actions": [ { "name": ":start:", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": "job-node", "data": null, "endTime": "Tue, 15 Sep 2015 09:49:59 GMT", "errorCode": null, "id": "0000001-150915094349962-oozie-hado-W@:start:", "consoleUrl": "-", "errorMessage": null, "toString": "Action name[:start:] status[OK]", "stats": null, "type": ":START:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "job-node", "trackerUri": "http://172.18.168.119:8032", "externalStatus": "FAILED/KILLED", "status": "ERROR", "externalId": "job_1442310173665_0002", "transition": "fail", "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "JA018", "id": "0000001-150915094349962-oozie-hado-W@job-node", "consoleUrl": "http://ap-cluster-all-0:8088/proxy/application_1442310173665_0002/", "errorMessage": "Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]", "toString": "Action name[job-node] status[ERROR]", "stats": null, "type": "pig", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "externalChildIDs": null, "cred": "null" }, { "name": "fail", "trackerUri": "-", "externalStatus": "OK", "status": "OK", "externalId": "-", "transition": null, "data": null, "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "errorCode": "E0729", "id": "0000001-150915094349962-oozie-hado-W@fail", "consoleUrl": "-", "errorMessage": "Workflow failed, error message[Main class [org.apache.oozie.action.hadoop.PigMain], exit code [2]]", "toString": "Action name[fail] status[OK]", "stats": null, "type": ":KILL:", "retries": 0, "startTime": "Tue, 15 Sep 2015 09:50:17 GMT", "externalChildIDs": null, "cred": "null" } ], "createdTime": "Tue, 15 Sep 2015 09:49:58 GMT", "status": "KILLED", "group": null, "externalId": null, "acl": null, "run": 0, "appName": "job-wf", "parentId": null, "conf": "\r\n \r\n user.name\r\n hadoop\r\n \r\n \r\n oozie.use.system.libpath\r\n true\r\n \r\n \r\n mapreduce.job.user.name\r\n hadoop\r\n \r\n \r\n nameNode\r\n hdfs://ap-cluster-all-0:9000\r\n \r\n \r\n jobTracker\r\n http://172.18.168.119:8032\r\n \r\n \r\n oozie.wf.application.path\r\n hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml\r\n \r\n", "id": "0000001-150915094349962-oozie-hado-W", "startTime": "Tue, 15 Sep 2015 09:49:59 GMT", "appPath": "hdfs://ap-cluster-all-0:9000/user/hadoop/pig-job-example/3038025d-9974-4993-a778-26a074cdfb8d/workflow.xml", "endTime": "Tue, 15 Sep 2015 09:50:17 GMT", "toString": "Workflow id[0000001-150915094349962-oozie-hado-W] status[KILLED]", "lastModTime": "Tue, 15 Sep 2015 09:50:17 GMT", "consoleUrl": "http://ap-cluster-all-0.novalocal:11000/oozie?job=0000001-150915094349962-oozie-hado-W" } } ] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/0000775000175000017500000000000000000000000023751 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-template-create-request.json0000664000175000017500000000044300000000000033464 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "plugin_version": "2.7.1", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "name": "master", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "flavor_id": "2" } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-template-create-response.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-template-create-response.jso0000664000175000017500000000201400000000000033450 0ustar00zuulzuul00000000000000{ "node_group_template": { "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "is_protected": false, "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "plugin_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "security_groups": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-template-show-response.json0000664000175000017500000000216700000000000033354 0ustar00zuulzuul00000000000000{ "node_group_template": { "is_public": false, "image_id": null, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "description": null, "plugin_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "is_protected": false, "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "security_groups": null, "volume_type": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-template-update-request.json0000664000175000017500000000033300000000000033501 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "plugin_version": "2.7.1", "node_processes": [ "datanode" ], "name": "new", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "flavor_id": "2" } ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-template-update-response.json 22 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-template-update-response.jso0000664000175000017500000000167000000000000033476 0ustar00zuulzuul00000000000000{ "node_group_template": { "is_public": false, "tenant_id": "808d5032ea0446889097723bfc8e919d", "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "is_protected": false, "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "plugin_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "security_groups": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "new", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode" ], "volumes_size": 0, "volume_local_to_instance": false, "volume_type": null } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/node-group-templates/node-group-templates-list-response.json0000664000175000017500000000510000000000000033520 0ustar00zuulzuul00000000000000{ "node_group_templates": [ { "is_public": false, "image_id": null, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "flavor_id": "2", "id": "0bb9f1a4-0c44-4dc5-9452-6741c62ed9ae", "description": null, "plugin_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:20:11", "is_protected": false, "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "master", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "volumes_size": 0, "volume_local_to_instance": false, "security_groups": null, "volume_type": null }, { "is_public": false, "image_id": null, "tenant_id": "808d5032ea0446889097723bfc8e919d", "shares": null, "floating_ip_pool": "033debed-aeb8-488c-b7d0-adb74c61faa5", "node_configs": {}, "auto_security_group": false, "is_default": false, "availability_zone": null, "plugin_name": "vanilla", "flavor_id": "2", "id": "846edb31-add5-46e6-a4ee-a4c339f99251", "description": null, "hadoop_version": "2.7.1", "use_autoconfig": true, "volumes_availability_zone": null, "created_at": "2015-09-14T10:27:00", "is_protected": false, "updated_at": null, "volumes_per_node": 0, "is_proxy_gateway": false, "name": "worker", "volume_mount_prefix": "/volumes/disk", "node_processes": [ "datanode", "nodemanager" ], "volumes_size": 0, "volume_local_to_instance": false, "security_groups": null, "volume_type": null } ], "markers": { "prev":"39dfc852-8588-4b61-8d2b-eb08a67ab240", "next":"eaa0bd97-ab54-43df-83ab-77a9774d7358" } } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/api-ref/source/v2/samples/plugins/0000775000175000017500000000000000000000000021357 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/plugins/plugin-show-response.json0000664000175000017500000000060000000000000026356 0ustar00zuulzuul00000000000000{ "plugin": { "name": "vanilla", "versions": [ "1.2.1", "2.4.1", "2.6.0" ], "title": "Vanilla Apache Hadoop", "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component." } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/plugins/plugin-update-request.json0000664000175000017500000000013400000000000026514 0ustar00zuulzuul00000000000000{ "plugin_labels": { "enabled": { "status": false } } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/plugins/plugin-update-response.json0000664000175000017500000000172200000000000026666 0ustar00zuulzuul00000000000000{ "plugin": { "plugin_labels": { "hidden": { "status": true, "mutable": true, "description": "Existence of plugin or its version is hidden, but still can be used for cluster creation by CLI and directly by client." }, "enabled": { "status": false, "mutable": true, "description": "Plugin or its version is enabled and can be used by user." } }, "description": "It's a fake plugin that aimed to work on the CirrOS images. It doesn't install Hadoop. It's needed to be able to test provisioning part of Sahara codebase itself.", "versions": [ "0.1" ], "tenant_id": "993f53c1f51845e48e013aeb632358d8", "title": "Fake Plugin", "version_labels": { "0.1": { "enabled": { "status": true, "mutable": true, "description": "Plugin or its version is enabled and can be used by user." } } }, "name": "fake" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/plugins/plugin-version-show-response.json0000664000175000017500000000552700000000000030056 0ustar00zuulzuul00000000000000{ "plugin": { "name": "vanilla", "versions": [ "1.2.1", "2.4.1", "2.6.0" ], "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "required_image_tags": [ "vanilla", "2.6.0" ], "node_processes": { "JobFlow": [ "oozie" ], "HDFS": [ "namenode", "datanode", "secondarynamenode" ], "YARN": [ "resourcemanager", "nodemanager" ], "MapReduce": [ "historyserver" ], "Hadoop": [], "Hive": [ "hiveserver" ] }, "configs": [ { "default_value": "/tmp/hadoop-${user.name}", "name": "hadoop.tmp.dir", "priority": 2, "config_type": "string", "applicable_target": "HDFS", "is_optional": true, "scope": "node", "description": "A base for other temporary directories." }, { "default_value": true, "name": "hadoop.native.lib", "priority": 2, "config_type": "bool", "applicable_target": "HDFS", "is_optional": true, "scope": "node", "description": "Should native hadoop libraries, if present, be used." }, { "default_value": 1024, "name": "NodeManager Heap Size", "config_values": null, "priority": 1, "config_type": "int", "applicable_target": "YARN", "is_optional": false, "scope": "node", "description": null }, { "default_value": true, "name": "Enable Swift", "config_values": null, "priority": 1, "config_type": "bool", "applicable_target": "general", "is_optional": false, "scope": "cluster", "description": null }, { "default_value": true, "name": "Enable MySQL", "config_values": null, "priority": 1, "config_type": "bool", "applicable_target": "general", "is_optional": true, "scope": "cluster", "description": null } ], "title": "Vanilla Apache Hadoop" } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/api-ref/source/v2/samples/plugins/plugins-list-response.json0000664000175000017500000000261700000000000026546 0ustar00zuulzuul00000000000000{ "plugins": [ { "name": "vanilla", "description": "The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any management consoles. It can also deploy the Oozie component.", "versions": [ "1.2.1", "2.4.1", "2.6.0" ], "title": "Vanilla Apache Hadoop" }, { "name": "hdp", "description": "The Hortonworks Sahara plugin automates the deployment of the Hortonworks Data Platform (HDP) on OpenStack.", "versions": [ "1.3.2", "2.0.6" ], "title": "Hortonworks Data Platform" }, { "name": "spark", "description": "This plugin provides an ability to launch Spark on Hadoop CDH cluster without any management consoles.", "versions": [ "1.0.0", "0.9.1" ], "title": "Apache Spark" }, { "name": "cdh", "description": "The Cloudera Sahara plugin provides the ability to launch the Cloudera distribution of Apache Hadoop (CDH) with Cloudera Manager management console.", "versions": [ "5", "5.3.0" ], "title": "Cloudera Plugin" } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/bandit.yaml0000664000175000017500000001174300000000000015213 0ustar00zuulzuul00000000000000# optional: after how many files to update progress #show_progress_every: 100 # optional: plugins directory name #plugins_dir: 'plugins' # optional: plugins discovery name pattern plugin_name_pattern: '*.py' # optional: terminal escape sequences to display colors #output_colors: # DEFAULT: '\033[0m' # HEADER: '\033[95m' # LOW: '\033[94m' # WARN: '\033[93m' # ERROR: '\033[91m' # optional: log format string #log_format: "[%(module)s]\t%(levelname)s\t%(message)s" # globs of files which should be analyzed include: - '*.py' - '*.pyw' # a list of strings, which if found in the path will cause files to be excluded # for example /tests/ - to remove all files in tests directory exclude_dirs: profiles: sahara_default: include: - hardcoded_password_string - hardcoded_password_funcarg # - hardcoded_password_default - blacklist_calls - blacklist_imports - subprocess_popen_with_shell_equals_true - subprocess_without_shell_equals_true - any_other_function_with_shell_equals_true - start_process_with_a_shell - start_process_with_no_shell - hardcoded_sql_expressions - jinja2_autoescape_false - use_of_mako_templates blacklist_calls: bad_name_sets: - pickle: qualnames: [pickle.loads, pickle.load, pickle.Unpickler, cPickle.loads, cPickle.load, cPickle.Unpickler] message: "Pickle library appears to be in use, possible security issue." - marshal: qualnames: [marshal.load, marshal.loads] message: "Deserialization with the marshal module is possibly dangerous." - md5: qualnames: [hashlib.md5] message: "Use of insecure MD5 hash function." - mktemp_q: qualnames: [tempfile.mktemp] message: "Use of insecure and deprecated function (mktemp)." - eval: qualnames: [eval] message: "Use of possibly insecure function - consider using safer ast.literal_eval." - mark_safe: qualnames: [mark_safe] message: "Use of mark_safe() may expose cross-site scripting vulnerabilities and should be reviewed." - httpsconnection: qualnames: [httplib.HTTPSConnection] message: "Use of HTTPSConnection does not provide security, see https://wiki.openstack.org/wiki/OSSN/OSSN-0033" - yaml_load: qualnames: [yaml.load] message: "Use of unsafe yaml load. Allows instantiation of arbitrary objects. Consider yaml.safe_load()." - urllib_urlopen: qualnames: [urllib.urlopen, urllib.urlretrieve, urllib.URLopener, urllib.FancyURLopener, urllib2.urlopen, urllib2.Request] message: "Audit url open for permitted schemes. Allowing use of file:/ or custom schemes is often unexpected." shell_injection: # Start a process using the subprocess module, or one of its wrappers. subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, utils.execute, utils.execute_with_timeout] # Start a process with a function vulnerable to shell injection. shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # Start a process with a function that is not vulnerable to shell injection. no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv,os.execve, os.execvp, os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, os.spawnvp, os.spawnvpe, os.startfile] blacklist_imports: bad_import_sets: - telnet: imports: [telnetlib] level: ERROR message: "Telnet is considered insecure. Use SSH or some other encrypted protocol." - info_libs: imports: [pickle, cPickle, subprocess, Crypto] level: LOW message: "Consider possible security implications associated with {module} module." hardcoded_tmp_directory: tmp_dirs: [/tmp, /var/tmp, /dev/shm] hardcoded_password: word_list: "wordlist/default-passwords" ssl_with_bad_version: bad_protocol_versions: - 'PROTOCOL_SSLv2' - 'SSLv2_METHOD' - 'SSLv23_METHOD' - 'PROTOCOL_SSLv3' # strict option - 'PROTOCOL_TLSv1' # strict option - 'SSLv3_METHOD' # strict option - 'TLSv1_METHOD' # strict option password_config_option_not_marked_secret: function_names: - oslo.config.cfg.StrOpt - oslo_config.cfg.StrOpt execute_with_run_as_root_equals_true: function_names: - ceilometer.utils.execute - cinder.utils.execute - neutron.agent.linux.utils.execute - nova.utils.execute - nova.utils.trycmd try_except_pass: check_typed_exception: True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/bindep.txt0000664000175000017500000000157500000000000015072 0ustar00zuulzuul00000000000000# This file contains runtime (non-python) dependencies # More info at: https://docs.openstack.org/infra/bindep/readme.html libssl-dev [platform:dpkg] openssl-devel [platform:rpm] # updates of the localized release notes require msgmerge gettext # Define the basic (test) requirements extracted from bindata-fallback.txt # - mysqladmin and psql mariadb [platform:rpm] mariadb-devel [platform:rpm] mariadb-server [platform:rpm] dev-db/mariadb [platform:gentoo] mysql-client [platform:dpkg] mysql-server [platform:dpkg] postgresql postgresql-client [platform:dpkg] libpq-dev [platform:dpkg] postgresql-server [platform:rpm] postgresql-devel [platform:rpm] # The Python binding for libguestfs are used by the sahara-image-pack # command. python3-guestfs [platform:dpkg] libguestfs-xfs [platform:dpkg] python3-libguestfs [platform:rpm] libguestfs-xfs [platform:redhat] xfsprogs [platform:suse] ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/devstack/0000775000175000017500000000000000000000000014664 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/README.rst0000664000175000017500000000100300000000000016345 0ustar00zuulzuul00000000000000====================== Enabling in Devstack ====================== 1. Download DevStack 2. Add this repo as an external repository in ``local.conf`` .. sourcecode:: bash [[local|localrc]] enable_plugin sahara https://opendev.org/openstack/sahara enable_plugin heat https://opendev.org/openstack/heat Optionally, a git refspec may be provided as follows: .. sourcecode:: bash [[local|localrc]] enable_plugin sahara https://opendev.org/openstack/sahara 3. run ``stack.sh`` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/exercise.sh0000664000175000017500000000270100000000000017027 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # Sanity check that Sahara started if enabled echo "*********************************************************************" echo "Begin DevStack Exercise: $0" echo "*********************************************************************" # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace # Settings # ======== # Keep track of the current directory EXERCISE_DIR=$(cd $(dirname "$0") && pwd) TOP_DIR=$(cd $EXERCISE_DIR/..; pwd) # Import common functions . $TOP_DIR/functions # Import configuration . $TOP_DIR/openrc # Import exercise configuration . $TOP_DIR/exerciserc is_service_enabled sahara || exit 55 if is_ssl_enabled_service "sahara" ||\ is_ssl_enabled_service "sahara-api" ||\ is_service_enabled tls-proxy; then SAHARA_SERVICE_PROTOCOL="https" fi SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} $CURL_GET $SAHARA_SERVICE_PROTOCOL://$SERVICE_HOST:8386/ 2>/dev/null \ | grep -q 'Auth' || die $LINENO "Sahara API isn't functioning!" set +o xtrace echo "*********************************************************************" echo "SUCCESS: End DevStack Exercise: $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/devstack/files/0000775000175000017500000000000000000000000015766 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/files/apache-sahara-api.template0000664000175000017500000000136400000000000022754 0ustar00zuulzuul00000000000000Listen %PUBLICPORT% WSGIDaemonProcess sahara-api processes=2 threads=1 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup sahara-api WSGIScriptAlias / %SAHARA_BIN_DIR%/sahara-wsgi-api WSGIApplicationGroup %{GLOBAL} WSGIPassAuthorization On AllowEncodedSlashes On = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/sahara-api.log %SSLENGINE% %SSLCERTFILE% %SSLKEYFILE% = 2.4> Require all granted Order allow,deny Allow from all ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/plugin.sh0000775000175000017500000002630000000000000016522 0ustar00zuulzuul00000000000000#!/bin/bash # # lib/sahara # Dependencies: # ``functions`` file # ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # # install_sahara # install_python_saharaclient # configure_sahara # start_sahara # stop_sahara # cleanup_sahara # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # Functions # --------- # create_sahara_accounts() - Set up common required sahara accounts # # Tenant User Roles # ------------------------------ # service sahara admin function create_sahara_accounts { create_service_user "sahara" get_or_create_service "sahara" "data-processing" "Sahara Data Processing" get_or_create_endpoint "data-processing" \ "$REGION_NAME" \ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT" \ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT" \ "$SAHARA_SERVICE_PROTOCOL://$SAHARA_SERVICE_HOST:$SAHARA_SERVICE_PORT" } # cleanup_sahara() - Remove residual data files, anything left over from # previous runs that would need to clean up. function cleanup_sahara { # Cleanup auth cache dir if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then sudo rm -f $(apache_site_config_for sahara-api) fi } function configure_sahara_apache_wsgi { local sahara_apache_conf=$(apache_site_config_for sahara-api) local sahara_ssl="" local sahara_certfile="" local sahara_keyfile="" local venv_path="" if is_ssl_enabled_service sahara; then sahara_ssl="SSLEngine On" sahara_certfile="SSLCertificateFile $SAHARA_SSL_CERT" sahara_keyfile="SSLCertificateKeyFile $SAHARA_SSL_KEY" fi sudo cp $SAHARA_DIR/devstack/files/apache-sahara-api.template $sahara_apache_conf sudo sed -e " s|%PUBLICPORT%|$SAHARA_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%SAHARA_BIN_DIR%|$SAHARA_BIN_DIR|g; s|%SSLENGINE%|$sahara_ssl|g; s|%SSLCERTFILE%|$sahara_certfile|g; s|%SSLKEYFILE%|$sahara_keyfile|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g " -i $sahara_apache_conf } # configure_sahara() - Set config files, create data dirs, etc function configure_sahara { sudo install -d -o $STACK_USER $SAHARA_CONF_DIR cp -p $SAHARA_DIR/etc/sahara/api-paste.ini $SAHARA_CONF_DIR configure_keystone_authtoken_middleware $SAHARA_CONF_FILE sahara # Set admin user parameters needed for trusts creation iniset $SAHARA_CONF_FILE \ trustee project_name $SERVICE_TENANT_NAME iniset $SAHARA_CONF_FILE trustee username sahara iniset $SAHARA_CONF_FILE \ trustee password $SERVICE_PASSWORD iniset $SAHARA_CONF_FILE \ trustee user_domain_name "$SERVICE_DOMAIN_NAME" iniset $SAHARA_CONF_FILE \ trustee project_domain_name "$SERVICE_DOMAIN_NAME" iniset $SAHARA_CONF_FILE \ trustee auth_url "$KEYSTONE_SERVICE_URI/v3" iniset_rpc_backend sahara $SAHARA_CONF_FILE DEFAULT # Set configuration to send notifications if is_service_enabled ceilometer; then iniset $SAHARA_CONF_FILE oslo_messaging_notifications driver "messaging" fi iniset $SAHARA_CONF_FILE DEFAULT debug $ENABLE_DEBUG_LOG_LEVEL iniset $SAHARA_CONF_FILE DEFAULT plugins $SAHARA_ENABLED_PLUGINS iniset $SAHARA_CONF_FILE \ database connection `database_connection_url sahara` if is_service_enabled neutron; then iniset $SAHARA_CONF_FILE neutron endpoint_type $SAHARA_ENDPOINT_TYPE if is_ssl_enabled_service "neutron" \ || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE neutron ca_file $SSL_BUNDLE_FILE fi fi if is_ssl_enabled_service "heat" || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE heat ca_file $SSL_BUNDLE_FILE fi iniset $SAHARA_CONF_FILE heat endpoint_type $SAHARA_ENDPOINT_TYPE if is_ssl_enabled_service "cinder" || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE cinder ca_file $SSL_BUNDLE_FILE fi iniset $SAHARA_CONF_FILE cinder endpoint_type $SAHARA_ENDPOINT_TYPE if is_ssl_enabled_service "nova" || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE nova ca_file $SSL_BUNDLE_FILE fi iniset $SAHARA_CONF_FILE nova endpoint_type $SAHARA_ENDPOINT_TYPE if is_ssl_enabled_service "swift" || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE swift ca_file $SSL_BUNDLE_FILE fi iniset $SAHARA_CONF_FILE swift endpoint_type $SAHARA_ENDPOINT_TYPE if is_ssl_enabled_service "key" || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE keystone ca_file $SSL_BUNDLE_FILE fi iniset $SAHARA_CONF_FILE keystone endpoint_type $SAHARA_ENDPOINT_TYPE if is_ssl_enabled_service "glance" || is_service_enabled tls-proxy; then iniset $SAHARA_CONF_FILE glance ca_file $SSL_BUNDLE_FILE fi iniset $SAHARA_CONF_FILE glance endpoint_type $SAHARA_ENDPOINT_TYPE # Register SSL certificates if provided if is_ssl_enabled_service sahara; then ensure_certificates SAHARA iniset $SAHARA_CONF_FILE ssl cert_file "$SAHARA_SSL_CERT" iniset $SAHARA_CONF_FILE ssl key_file "$SAHARA_SSL_KEY" fi iniset $SAHARA_CONF_FILE DEFAULT use_syslog $SYSLOG # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ]; then if [ "$SAHARA_USE_MOD_WSGI" == "False" ]; then setup_colorized_logging $SAHARA_CONF_FILE DEFAULT fi fi if is_service_enabled tls-proxy; then # Set the service port for a proxy to take the original iniset $SAHARA_CONF_FILE DEFAULT port $SAHARA_SERVICE_PORT_INT fi if [ "$SAHARA_ENABLE_DISTRIBUTED_PERIODICS" == "True" ]; then # Enable distributed periodic tasks iniset $SAHARA_CONF_FILE DEFAULT periodic_coordinator_backend_url\ $SAHARA_PERIODIC_COORDINATOR_URL pip_install tooz[memcached] restart_service memcached fi recreate_database sahara $SAHARA_BIN_DIR/sahara-db-manage \ --config-file $SAHARA_CONF_FILE upgrade head if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then configure_sahara_apache_wsgi fi } # install_sahara() - Collect source and prepare function install_sahara { setup_develop $SAHARA_DIR if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then install_apache_wsgi fi } # install_ambari() - Collect source and prepare function install_ambari { git_clone $AMBARI_PLUGIN_REPO $AMBARI_PLUGIN_DIR $AMBARI_PLUGIN_BRANCH setup_develop $AMBARI_PLUGIN_DIR } # install_cdh() - Collect source and prepare function install_cdh { git_clone $CDH_PLUGIN_REPO $CDH_PLUGIN_DIR $CDH_PLUGIN_BRANCH setup_develop $CDH_PLUGIN_DIR } # install_mapr() - Collect source and prepare function install_mapr { git_clone $MAPR_PLUGIN_REPO $MAPR_PLUGIN_DIR $MAPR_PLUGIN_BRANCH setup_develop $MAPR_PLUGIN_DIR } # install_spark() - Collect source and prepare function install_spark { git_clone $SPARK_PLUGIN_REPO $SPARK_PLUGIN_DIR $SPARK_PLUGIN_BRANCH setup_develop $SPARK_PLUGIN_DIR } # install_storm() - Collect source and prepare function install_storm { git_clone $STORM_PLUGIN_REPO $STORM_PLUGIN_DIR $STORM_PLUGIN_BRANCH setup_develop $STORM_PLUGIN_DIR } # install_vanilla() - Collect source and prepare function install_vanilla { git_clone $VANILLA_PLUGIN_REPO $VANILLA_PLUGIN_DIR $VANILLA_PLUGIN_BRANCH setup_develop $VANILLA_PLUGIN_DIR } # install_python_saharaclient() - Collect source and prepare function install_python_saharaclient { if use_library_from_git "python-saharaclient"; then git_clone $SAHARACLIENT_REPO $SAHARACLIENT_DIR $SAHARACLIENT_BRANCH setup_develop $SAHARACLIENT_DIR fi } # start_sahara() - Start running processes, including screen function start_sahara { local service_port=$SAHARA_SERVICE_PORT local service_protocol=$SAHARA_SERVICE_PROTOCOL if is_service_enabled tls-proxy; then service_port=$SAHARA_SERVICE_PORT_INT service_protocol="http" fi if [ "$SAHARA_USE_MOD_WSGI" == "True" ] ; then enable_apache_site sahara-api restart_apache_server else run_process sahara-api "$SAHARA_BIN_DIR/sahara-api \ --config-file $SAHARA_CONF_FILE" fi run_process sahara-eng "$SAHARA_BIN_DIR/sahara-engine \ --config-file $SAHARA_CONF_FILE" echo "Waiting for Sahara to start..." if ! wait_for_service $SERVICE_TIMEOUT \ $service_protocol://$SAHARA_SERVICE_HOST:$service_port; then die $LINENO "Sahara did not start" fi # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy '*' $SAHARA_SERVICE_PORT \ $SAHARA_SERVICE_HOST \ $SAHARA_SERVICE_PORT_INT & fi } # configure_tempest_for_sahara() - Tune Tempest configuration for Sahara function configure_tempest_for_sahara { if is_service_enabled tempest; then iniset $TEMPEST_CONFIG service_available sahara True iniset $TEMPEST_CONFIG data-processing-feature-enabled plugins $SAHARA_INSTALLED_PLUGINS fi } # stop_sahara() - Stop running processes function stop_sahara { # Kill the Sahara screen windows if [ "$SAHARA_USE_MOD_WSGI" == "True" ]; then disable_apache_site sahara-api restart_apache_server else stop_process sahara-all stop_process sahara-api stop_process sahara-eng fi } # is_sahara_enabled. This allows is_service_enabled sahara work # correctly throughout devstack. function is_sahara_enabled { if is_service_enabled sahara-api || \ is_service_enabled sahara-eng; then return 0 else return 1 fi } function is_plugin_required { if [ "${SAHARA_INSTALLED_PLUGINS/$1}" = "$SAHARA_INSTALLED_PLUGINS" ] ; then return 1 else return 0 fi } # Dispatcher for Sahara plugin if is_service_enabled sahara; then if [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing sahara" install_sahara if is_plugin_required ambari; then install_ambari fi if is_plugin_required cdh; then install_cdh fi if is_plugin_required mapr; then install_mapr fi if is_plugin_required spark; then install_spark fi if is_plugin_required storm; then install_storm fi if is_plugin_required vanilla; then install_vanilla fi install_python_saharaclient cleanup_sahara elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring sahara" configure_sahara create_sahara_accounts elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing sahara" start_sahara elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then echo_summary "Configuring tempest" configure_tempest_for_sahara fi if [[ "$1" == "unstack" ]]; then stop_sahara fi if [[ "$1" == "clean" ]]; then cleanup_sahara fi fi # Restore xtrace $XTRACE # Local variables: # mode: shell-script # End: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/settings0000664000175000017500000000507400000000000016455 0ustar00zuulzuul00000000000000#!/bin/bash # Settings needed for the Sahara plugin # ------------------------------------- # Set up default directories SAHARACLIENT_DIR=$DEST/python-saharaclient SAHARA_DIR=$DEST/sahara AMBARI_PLUGIN_DIR=$DEST/sahara-plugin-ambari CDH_PLUGIN_DIR=$DEST/sahara-plugin-cdh MAPR_PLUGIN_DIR=$DEST/sahara-plugin-mapr SPARK_PLUGIN_DIR=$DEST/sahara-plugin-spark STORM_PLUGIN_DIR=$DEST/sahara-plugin-storm VANILLA_PLUGIN_DIR=$DEST/sahara-plugin-vanilla SAHARACLIENT_REPO=${SAHARACLIENT_REPO:-\ ${GIT_BASE}/openstack/python-saharaclient.git} SAHARACLIENT_BRANCH=${SAHARACLIENT_BRANCH:-master} AMBARI_PLUGIN_REPO=${AMBARI_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-ambari/} AMBARI_PLUGIN_BRANCH=${AMBARI_PLUGIN_BRANCH:-master} CDH_PLUGIN_REPO=${CDH_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-cdh/} CDH_PLUGIN_BRANCH=${CDH_PLUGIN_BRANCH:-master} MAPR_PLUGIN_REPO=${MAPR_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-mapr/} MAPR_PLUGIN_BRANCH=${MAPR_PLUGIN_BRANCH:-master} SPARK_PLUGIN_REPO=${SPARK_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-spark/} SPARK_PLUGIN_BRANCH=${SPARK_PLUGIN_BRANCH:-master} STORM_PLUGIN_REPO=${STORM_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-storm/} STORM_PLUGIN_BRANCH=${STORM_PLUGIN_BRANCH:-master} VANILLA_PLUGIN_REPO=${VANILLA_PLUGIN_REPO:-https://opendev.org/openstack/sahara-plugin-vanilla/} VANILLA_PLUGIN_BRANCH=${VANILLA_PLUGIN_BRANCH:-master} SAHARA_CONF_DIR=${SAHARA_CONF_DIR:-/etc/sahara} SAHARA_CONF_FILE=${SAHARA_CONF_DIR}/sahara.conf # TODO(slukjanov): Should we append sahara to SSL_ENABLED_SERVICES? if is_ssl_enabled_service "sahara" || is_service_enabled tls-proxy; then SAHARA_SERVICE_PROTOCOL="https" fi SAHARA_SERVICE_HOST=${SAHARA_SERVICE_HOST:-$SERVICE_HOST} SAHARA_SERVICE_PORT=${SAHARA_SERVICE_PORT:-8386} SAHARA_SERVICE_PORT_INT=${SAHARA_SERVICE_PORT_INT:-18386} SAHARA_SERVICE_PROTOCOL=${SAHARA_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} SAHARA_ENDPOINT_TYPE=${SAHARA_ENDPOINT_TYPE:-publicURL} SAHARA_ENABLED_PLUGINS=${SAHARA_ENABLED_PLUGINS:-\ vanilla,ambari,cdh,mapr,spark,storm,fake} SAHARA_INSTALLED_PLUGINS=${SAHARA_INSTALLED_PLUGINS:-\ vanilla,ambari,cdh,mapr,spark,storm,fake} SAHARA_BIN_DIR=$(get_python_exec_prefix) SAHARA_ENABLE_DISTRIBUTED_PERIODICS=${SAHARA_ENABLE_DISTRIBUTED_PERIODICS:-\ True} SAHARA_PERIODIC_COORDINATOR_URL=${SAHARA_PERIODIC_COORDINATOR_URL:-\ memcached://127.0.0.1:11211} #Toggle for deploying Sahara API with Apache + mod_wsgi SAHARA_USE_MOD_WSGI=${SAHARA_USE_MOD_WSGI:-True} enable_service sahara-api sahara-eng enable_service heat h-eng h-api h-api-cfn h-api-cw ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/devstack/upgrade/0000775000175000017500000000000000000000000016313 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/devstack/upgrade/from-liberty/0000775000175000017500000000000000000000000020726 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/upgrade/from-liberty/upgrade-sahara0000664000175000017500000000044600000000000023541 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-sahara`` function configure_sahara_upgrade { XTRACE=$(set +o | grep xtrace) set -o xtrace # Copy api-paste.ini to configuration directory cp -p $SAHARA_DIR/etc/sahara/api-paste.ini $SAHARA_CONF_DIR # reset to previous state $XTRACE } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/devstack/upgrade/from-mitaka/0000775000175000017500000000000000000000000020522 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/upgrade/from-mitaka/upgrade-sahara0000775000175000017500000000054400000000000023337 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-sahara`` function configure_sahara_upgrade { XTRACE=$(set +o | grep xtrace) set -o xtrace local old_plugins old_plugins=$(cat $SAHARA_CONF_DIR/sahara.conf | grep ^plugins) sed -i.bak "s/$old_plugins/plugins=fake,vanilla,cdh/g" $SAHARA_CONF_DIR/sahara.conf # reset to previous state $XTRACE } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/devstack/upgrade/from-rocky/0000775000175000017500000000000000000000000020403 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/upgrade/from-rocky/upgrade-sahara0000775000175000017500000000043600000000000023220 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-sahara`` function configure_sahara_upgrade { XTRACE=$(set +o | grep xtrace) set -o xtrace install_ambari install_cdh install_mapr install_spark install_storm install_vanilla # reset to previous state $XTRACE } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/upgrade/resources.sh0000775000175000017500000001435500000000000020674 0ustar00zuulzuul00000000000000#!/bin/bash set -o errexit . $GRENADE_DIR/grenaderc . $GRENADE_DIR/functions . $TOP_DIR/openrc admin admin set -o xtrace SAHARA_USER=sahara_grenade SAHARA_PROJECT=sahara_grenade SAHARA_PASS=pass SAHARA_KEY=sahara_key SAHARA_KEY_FILE=$SAVE_DIR/sahara_key.pem PUBLIC_NETWORK_NAME=${PUBLIC_NETWORK_NAME:-public} # cirros image is not appropriate for cluster creation SAHARA_IMAGE_NAME=${SAHARA_IMAGE_NAME:-fedora-heat-test-image} SAHARA_IMAGE_USER=${SAHARA_IMAGE_USER:-fedora} # custom flavor parameters SAHARA_FLAVOR_NAME=${SAHARA_FLAVOR_NAME:-sahara_flavor} SAHARA_FLAVOR_RAM=${SAHARA_FLAVOR_RAM:-1024} SAHARA_FLAVOR_DISK=${SAHARA_FLAVOR_DISK:-10} NG_TEMPLATE_NAME=ng-template-grenade CLUSTER_TEMPLATE_NAME=cluster-template-grenade CLUSTER_NAME=cluster-grenade function sahara_set_user { # set ourselves to the created sahara user OS_TENANT_NAME=$SAHARA_PROJECT OS_PROJECT_NAME=$SAHARA_PROJECT OS_USERNAME=$SAHARA_USER OS_PASSWORD=$SAHARA_PASS } function create_tenant { # create a tenant for the server eval $(openstack project create -f shell -c id $SAHARA_PROJECT) if [[ -z "$id" ]]; then die $LINENO "Didn't create $SAHARA_PROJECT project" fi resource_save sahara project_id $id } function create_user { local project_id=$id eval $(openstack user create $SAHARA_USER \ --project $project_id \ --password $SAHARA_PASS \ -f shell -c id) if [[ -z "$id" ]]; then die $LINENO "Didn't create $SAHARA_USER user" fi resource_save sahara user_id $id # Workaround for bug: https://bugs.launchpad.net/keystone/+bug/1662911 openstack role add member --user $id --project $project_id } function create_keypair { # create key pair for access openstack keypair create $SAHARA_KEY > $SAHARA_KEY_FILE chmod 600 $SAHARA_KEY_FILE } function create_flavor { eval $(openstack flavor create -f shell -c id \ --ram $SAHARA_FLAVOR_RAM \ --disk $SAHARA_FLAVOR_DISK \ $SAHARA_FLAVOR_NAME) resource_save sahara flavor_id $id } function register_image { eval $(openstack image show \ -f shell -c id $SAHARA_IMAGE_NAME) resource_save sahara image_id $id openstack dataprocessing image register $id --username $SAHARA_IMAGE_USER openstack dataprocessing image tags set $id --tags fake 0.1 } function create_node_group_template { eval $(openstack network show -f shell -c id $PUBLIC_NETWORK_NAME) local public_net_id=$id local flavor_id=$(resource_get sahara flavor_id) openstack dataprocessing node group template create \ --name $NG_TEMPLATE_NAME \ --flavor $flavor_id \ --plugin fake \ --plugin-version 0.1 \ --processes jobtracker namenode tasktracker datanode \ --floating-ip-pool $public_net_id \ --auto-security-group } function create_cluster_template { openstack dataprocessing cluster template create \ --name $CLUSTER_TEMPLATE_NAME \ --node-groups $NG_TEMPLATE_NAME:1 } function create_cluster { local net_id=$(resource_get network net_id) local image_id=$(resource_get sahara image_id) if [[ -n "$net_id" ]]; then eval $(openstack dataprocessing cluster create \ --name $CLUSTER_NAME \ --cluster-template $CLUSTER_TEMPLATE_NAME \ --image $image_id \ --user-keypair $SAHARA_KEY \ --neutron-network $net_id \ -f shell -c id) else eval $(openstack dataprocessing cluster create \ --name $CLUSTER_NAME \ --cluster-template $CLUSTER_TEMPLATE_NAME \ --image $image_id \ --user-keypair $SAHARA_KEY \ -f shell -c id) fi resource_save sahara cluster_id $id } function wait_active_state { # wait until cluster moves to active state local timeleft=1000 while [[ $timeleft -gt 0 ]]; do eval $(openstack dataprocessing cluster show -f shell \ -c Status $CLUSTER_NAME) if [[ "$status" != "Active" ]]; then if [[ "$status" == "Error" ]]; then die $LINENO "Cluster is in Error state" fi echo "Cluster is still not in Active state" sleep 10 timeleft=$((timeleft - 10)) if [[ $timeleft == 0 ]]; then die $LINENO "Cluster hasn't moved to Active state \ during 1000 seconds" fi else break fi done } function check_active { # check that cluster is in Active state eval $(openstack dataprocessing cluster show -f shell \ -c Status $CLUSTER_NAME) if [[ "$status" != "Active" ]]; then die $LINENO "Cluster is not in Active state anymore" fi echo "Sahara verification: SUCCESS" } function create { create_tenant create_user create_flavor register_image sahara_set_user create_keypair create_node_group_template create_cluster_template create_cluster wait_active_state } function verify { : } function verify_noapi { : } function destroy { sahara_set_user set +o errexit # delete cluster check_active openstack dataprocessing cluster delete $CLUSTER_NAME --wait set -o errexit # delete cluster template openstack dataprocessing cluster template delete $CLUSTER_TEMPLATE_NAME # delete node group template openstack dataprocessing node group template delete $NG_TEMPLATE_NAME source_quiet $TOP_DIR/openrc admin admin # unregister image local image_id=$(resource_get sahara image_id) openstack dataprocessing image unregister $image_id # delete flavor openstack flavor delete $SAHARA_FLAVOR_NAME # delete user and project local user_id=$(resource_get sahara user_id) local project_id=$(resource_get sahara project_id) openstack user delete $user_id openstack project delete $project_id } # Dispatcher case $1 in "create") create ;; "verify_noapi") verify_noapi ;; "verify") verify ;; "destroy") destroy ;; "force_destroy") set +o errexit destroy ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/upgrade/settings0000664000175000017500000000227300000000000020102 0ustar00zuulzuul00000000000000#!/bin/bash register_project_for_upgrade sahara register_db_to_save sahara devstack_localrc base IMAGE_URLS=\ "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-uec.tar.gz,\ http://tarballs.openstack.org/heat-test-image/fedora-heat-test-image.qcow2" devstack_localrc base enable_plugin sahara \ https://opendev.org/openstack/sahara \ stable/train devstack_localrc base enable_plugin heat \ https://opendev.org/openstack/heat \ stable/train devstack_localrc base DEFAULT_IMAGE_NAME="cirros-0.3.5-x86_64-uec" devstack_localrc target IMAGE_URLS=\ "http://download.cirros-cloud.net/0.3.5/cirros-0.3.5-x86_64-uec.tar.gz,\ http://tarballs.openstack.org/heat-test-image/fedora-heat-test-image.qcow2" devstack_localrc target enable_plugin sahara \ https://opendev.org/openstack/sahara devstack_localrc target enable_plugin heat \ https://opendev.org/openstack/heat devstack_localrc target LIBS_FROM_GIT=python-saharaclient devstack_localrc target DEFAULT_IMAGE_NAME="cirros-0.3.5-x86_64-uec" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/upgrade/shutdown.sh0000775000175000017500000000075700000000000020536 0ustar00zuulzuul00000000000000#!/bin/bash # ``shutdown-sahara`` set -o errexit . $GRENADE_DIR/grenaderc . $GRENADE_DIR/functions # We need base DevStack functions for this . $BASE_DEVSTACK_DIR/functions . $BASE_DEVSTACK_DIR/stackrc # needed for status directory . $BASE_DEVSTACK_DIR/lib/tls . $BASE_DEVSTACK_DIR/lib/apache . ${GITDIR[sahara]}/devstack/plugin.sh set -o xtrace export ENABLED_SERVICES+=,sahara-api,sahara-eng, stop_sahara # sanity check that service is actually down ensure_services_stopped sahara-eng ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/devstack/upgrade/upgrade.sh0000775000175000017500000000407500000000000020307 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-sahara`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "********************************************************************" echo "ERROR: Abort $0" echo "********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params . $GRENADE_DIR/grenaderc # Import common functions . $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade Sahara # ============ # Get functions from current DevStack . $TARGET_DEVSTACK_DIR/stackrc . $TARGET_DEVSTACK_DIR/lib/apache . $TARGET_DEVSTACK_DIR/lib/tls . $(dirname $(dirname $BASH_SOURCE))/plugin.sh . $(dirname $(dirname $BASH_SOURCE))/settings # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace # Save current config files for posterity [[ -d $SAVE_DIR/etc.sahara ]] || cp -pr $SAHARA_CONF_DIR $SAVE_DIR/etc.sahara # install_sahara() stack_install_service sahara install_python_saharaclient # calls upgrade-sahara for specific release upgrade_project sahara $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database $SAHARA_BIN_DIR/sahara-db-manage --config-file $SAHARA_CONF_FILE \ upgrade head || die $LINENO "DB sync error" # Start Sahara start_sahara # Don't succeed unless the service come up ensure_services_started sahara-eng set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.641891 sahara-16.0.0/doc/0000775000175000017500000000000000000000000013625 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/requirements.txt0000664000175000017500000000064000000000000017111 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. openstackdocstheme>=1.31.2 # Apache-2.0 os-api-ref>=1.6.0 # Apache-2.0 reno>=2.5.0 # Apache-2.0 sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2 # BSD sphinxcontrib-httpdomain>=1.3.0 # BSD whereto>=0.3.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.645891 sahara-16.0.0/doc/source/0000775000175000017500000000000000000000000015125 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.645891 sahara-16.0.0/doc/source/_extra/0000775000175000017500000000000000000000000016407 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/_extra/.htaccess0000664000175000017500000000152600000000000020211 0ustar00zuulzuul00000000000000# renamed after the switch to Storyboard redirectmatch 301 ^/sahara/([^/]+)/contributor/launchpad.html$ /sahara/$1/contributor/project.html # renamed after some documentation reshuffling redirectmatch 301 ^/sahara/(?!ocata|pike|queens)([^/]+)/user/vanilla-imagebuilder.html$ /sahara/$1/user/vanilla-plugin.html redirectmatch 301 ^/sahara/(?!ocata|pike|queens)([^/]+)/user/cdh-imagebuilder.html$ /sahara/$1/user/cdh-plugin.html redirectmatch 301 ^/sahara/(?!ocata|pike|queens)([^/]+)/user/guest-requirements.html$ /sahara/$1/user/building-guest-images.html redirectmatch 301 ^/sahara/([^/]+)/user/([^-]+)-plugin.html$ /sahara-plugin-$2/$1/ redirectmatch 301 ^/sahara/([^/]+)/contributor/how-to-participate.html$ /sahara/$1/contributor/contributing.html redirectmatch 301 ^/sahara/([^/]+)/contributor/project.html$ /sahara/$1/contributor/contributing.html ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.645891 sahara-16.0.0/doc/source/_templates/0000775000175000017500000000000000000000000017262 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/_templates/sidebarlinks.html0000664000175000017500000000051200000000000022620 0ustar00zuulzuul00000000000000

Useful Links

{% if READTHEDOCS %} {% endif %} ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.645891 sahara-16.0.0/doc/source/_theme_rtd/0000775000175000017500000000000000000000000017237 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/_theme_rtd/layout.html0000664000175000017500000000020500000000000021437 0ustar00zuulzuul00000000000000{% extends "basic/layout.html" %} {% set css_files = css_files + ['_static/tweaks.css'] %} {% block relbar1 %}{% endblock relbar1 %}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/_theme_rtd/theme.conf0000664000175000017500000000010700000000000021206 0ustar00zuulzuul00000000000000[theme] inherit = nature stylesheet = nature.css pygments_style = tango././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.645891 sahara-16.0.0/doc/source/admin/0000775000175000017500000000000000000000000016215 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/admin/advanced-configuration-guide.rst0000664000175000017500000006267200000000000024471 0ustar00zuulzuul00000000000000Sahara Advanced Configuration Guide =================================== This guide addresses specific aspects of Sahara configuration that pertain to advanced usage. It is divided into sections about various features that can be utilized, and their related configurations. .. _custom_network_topologies: Custom network topologies ------------------------- Sahara accesses instances at several stages of cluster spawning through SSH and HTTP. Floating IPs and network namespaces will be automatically used for access when present. When floating IPs are not assigned to instances and namespaces are not being used, sahara will need an alternative method to reach them. The ``proxy_command`` parameter of the configuration file can be used to give sahara a command to access instances. This command is run on the sahara host and must open a netcat socket to the instance destination port. The ``{host}`` and ``{port}`` keywords should be used to describe the destination, they will be substituted at runtime. Other keywords that can be used are: ``{tenant_id}``, ``{network_id}`` and ``{router_id}``. Additionally, if ``proxy_command_use_internal_ip`` is set to ``True``, then the internal IP will be substituted for ``{host}`` in the command. Otherwise (if ``False``, by default) the management IP will be used: this corresponds to floating IP if present in the relevant node group, else the internal IP. The option is ignored if ``proxy_command`` is not also set. For example, the following parameter in the sahara configuration file would be used if instances are accessed through a relay machine: .. code-block:: [DEFAULT] proxy_command='ssh relay-machine-{tenant_id} nc {host} {port}' Whereas the following shows an example of accessing instances though a custom network namespace: .. code-block:: [DEFAULT] proxy_command='ip netns exec ns_for_{network_id} nc {host} {port}' .. _dns_hostname_resolution: DNS Hostname Resolution ----------------------- Sahara can resolve hostnames of cluster instances by using DNS. For this Sahara uses Designate. With this feature, for each instance of the cluster Sahara will create two ``A`` records (for internal and external ips) under one hostname and one ``PTR`` record. Also all links in the Sahara dashboard will be displayed as hostnames instead of just ip addresses. You should configure DNS server with Designate. Designate service should be properly installed and registered in Keystone catalog. The detailed instructions about Designate configuration can be found here: :designate-doc:`Designate manual installation ` and here: :neutron-doc:`Configuring OpenStack Networking with Designate `. Also if you use devstack you can just enable the :designate-doc:`Designate devstack plugin `. When Designate is configured you should create domain(s) for hostname resolution. This can be done by using the Designate dashboard or by CLI. Also you have to create ``in-addr.arpa.`` domain for reverse hostname resolution because some plugins (e.g. ``HDP``) determine hostname by ip. Sahara also should be properly configured. In ``sahara.conf`` you must specify two config properties: .. code-block:: [DEFAULT] # Use Designate for internal and external hostnames resolution: use_designate=true # IP addresses of Designate nameservers: nameservers=1.1.1.1,2.2.2.2 An OpenStack operator should properly configure the network. It must enable DHCP and specify DNS server ip addresses (e.g. 1.1.1.1 and 2.2.2.2) in ``DNS Name Servers`` field in the ``Subnet Details``. If the subnet already exists and changing it or creating new one is impossible then Sahara will manually change ``/etc/resolv.conf`` file on every instance of the cluster (if ``nameservers`` list has been specified in ``sahara.conf``). In this case, though, Sahara cannot guarantee that these changes will not be overwritten by DHCP or other services of the existing network. Sahara has a health check for track this situation (and if it occurs the health status will be red). In order to resolve hostnames from your local machine you should properly change your ``/etc/resolv.conf`` file by adding appropriate ip addresses of DNS servers (e.g. 1.1.1.1 and 2.2.2.2). Also the VMs with DNS servers should be available from your local machine. .. _data_locality_configuration: Data-locality configuration --------------------------- Hadoop provides the data-locality feature to enable task tracker and data nodes the capability of spawning on the same rack, Compute node, or virtual machine. Sahara exposes this functionality to the user through a few configuration parameters and user defined topology files. To enable data-locality, set the ``enable_data_locality`` parameter to ``true`` in the sahara configuration file .. code-block:: [DEFAULT] enable_data_locality=true With data locality enabled, you must now specify the topology files for the Compute and Object Storage services. These files are specified in the sahara configuration file as follows: .. code-block:: [DEFAULT] compute_topology_file=/etc/sahara/compute.topology swift_topology_file=/etc/sahara/swift.topology The ``compute_topology_file`` should contain mappings between Compute nodes and racks in the following format: .. code-block:: compute1 /rack1 compute2 /rack2 compute3 /rack2 Note that the Compute node names must be exactly the same as configured in OpenStack (``host`` column in admin list for instances). The ``swift_topology_file`` should contain mappings between Object Storage nodes and racks in the following format: .. code-block:: node1 /rack1 node2 /rack2 node3 /rack2 Note that the Object Storage node names must be exactly the same as configured in the object ring. Also, you should ensure that instances with the task tracker process have direct access to the Object Storage nodes. Hadoop versions after 1.2.0 support four-layer topology (for more detail please see `HADOOP-8468 JIRA issue`_). To enable this feature set the ``enable_hypervisor_awareness`` parameter to ``true`` in the configuration file. In this case sahara will add the Compute node ID as a second level of topology for virtual machines. .. _HADOOP-8468 JIRA issue: https://issues.apache.org/jira/browse/HADOOP-8468 .. _distributed-mode-configuration: Distributed mode configuration ------------------------------ Sahara can be configured to run in a distributed mode that creates a separation between the API and engine processes. This allows the API process to remain relatively free to handle requests while offloading intensive tasks to the engine processes. The ``sahara-api`` application works as a front-end and serves user requests. It offloads 'heavy' tasks to the ``sahara-engine`` process via RPC mechanisms. While the ``sahara-engine`` process could be loaded with tasks, ``sahara-api`` stays free and hence may quickly respond to user queries. If sahara runs on several hosts, the API requests could be balanced between several ``sahara-api`` hosts using a load balancer. It is not required to balance load between different ``sahara-engine`` hosts as this will be automatically done via the message broker. If a single host becomes unavailable, other hosts will continue serving user requests. Hence, a better scalability is achieved and some fault tolerance as well. Note that distributed mode is not a true high availability. While the failure of a single host does not affect the work of the others, all of the operations running on the failed host will stop. For example, if a cluster scaling is interrupted, the cluster will be stuck in a half-scaled state. The cluster might continue working, but it will be impossible to scale it further or run jobs on it via EDP. To run sahara in distributed mode pick several hosts on which you want to run sahara services and follow these steps: * On each host install and configure sahara using the `installation guide <../install/installation-guide.html>`_ except: * Do not run ``sahara-db-manage`` or launch sahara with ``sahara-all`` * Ensure that each configuration file provides a database connection string to a single database for all hosts. * Run ``sahara-db-manage`` as described in the installation guide, but only on a single (arbitrarily picked) host. * The ``sahara-api`` and ``sahara-engine`` processes use oslo.messaging to communicate with each other. You will need to configure it properly on each host (see below). * Run ``sahara-api`` and ``sahara-engine`` on the desired hosts. You may run both processes on the same or separate hosts as long as they are configured to use the same message broker and database. To configure ``oslo.messaging``, first you need to choose a message broker driver. The recommended driver is ``RabbitMQ``. For the ``RabbitMQ`` drivers please see the :ref:`notification-configuration` documentation for an explanation of common configuration options; the entire list of configuration options is found in the :oslo.messaging-doc:`oslo_messaging_rabbit documentation `. These options will also be present in the generated sample configuration file. For instructions on creating the configuration file please see the :doc:`configuration-guide`. .. _distributed-periodic-tasks: Distributed periodic tasks configuration ---------------------------------------- If sahara is configured to run in distributed mode (see :ref:`distributed-mode-configuration`), periodic tasks can also be launched in distributed mode. In this case tasks will be split across all ``sahara-engine`` processes. This will reduce overall load. Distributed periodic tasks are based on Hash Ring implementation and the Tooz library that provides group membership support for a set of backends. In order to use periodic tasks distribution, the following steps are required: * One of the :tooz-doc:`supported backends ` should be configured and started. * Backend URL should be set in the sahara configuration file with the ``periodic_coordinator_backend_url`` parameter. For example, if the ZooKeeper backend is being used: .. code-block:: [DEFAULT] periodic_coordinator_backend_url=kazoo://IP:PORT * Tooz extras should be installed. When using Zookeeper as coordination backend, ``kazoo`` library should be installed. It can be done with pip: .. code-block:: pip install tooz[zookeeper] * Periodic tasks can be performed in parallel. Number of threads to run periodic tasks on a single engine can be set with ``periodic_workers_number`` parameter (only 1 thread will be launched by default). Example: .. code-block:: [DEFAULT] periodic_workers_number=2 * ``coordinator_heartbeat_interval`` can be set to change the interval between heartbeat execution (1 second by default). Heartbeats are needed to make sure that connection to the coordination backend is active. Example: .. code-block:: [DEFAULT] coordinator_heartbeat_interval=2 * ``hash_ring_replicas_count`` can be set to change the number of replicas for each engine on a Hash Ring. Each replica is a point on a Hash Ring that belongs to a particular engine. A larger number of replicas leads to better task distribution across the set of engines. (40 by default). Example: .. code-block:: [DEFAULT] hash_ring_replicas_count=100 .. _external_key_manager_usage: External key manager usage -------------------------- Sahara generates and stores several passwords during the course of operation. To harden sahara's usage of passwords it can be instructed to use an external key manager for storage and retrieval of these secrets. To enable this feature there must first be an OpenStack Key Manager service deployed within the stack. With a Key Manager service deployed on the stack, sahara must be configured to enable the external storage of secrets. Sahara uses the :castellan-doc:`castellan <>` library to interface with the OpenStack Key Manager service. This library provides configurable access to a key manager. To configure sahara to use barbican as the key manager, edit the sahara configuration file as follows: .. code-block:: [DEFAULT] use_barbican_key_manager=true Enabling the ``use_barbican_key_manager`` option will configure castellan to use barbican as its key management implementation. By default it will attempt to find barbican in the Identity service's service catalog. For added control of the barbican server location, optional configuration values may be added to specify the URL for the barbican API server. .. code-block:: [castellan] barbican_api_endpoint=http://{barbican controller IP:PORT}/ barbican_api_version=v1 The specific values for the barbican endpoint will be dictated by the IP address of the controller for your installation. With all of these values configured and the Key Manager service deployed, sahara will begin storing its secrets in the external manager. Indirect instance access through proxy nodes -------------------------------------------- .. warning:: The indirect VMs access feature is in alpha state. We do not recommend using it in a production environment. Sahara needs to access instances through SSH during cluster setup. This access can be obtained a number of different ways (see :ref:`floating_ip_management`,:ref:`custom_network_topologies`).Sometimes it is impossible to provide access to all nodes (because of limited numbers of floating IPs or security policies). In these cases access can be gained using other nodes of the cluster as proxy gateways. To enable this set ``is_proxy_gateway=true`` for the node group you want to use as proxy. Sahara will communicate with all other cluster instances through the instances of this node group. Note, if ``use_floating_ips=true`` and the cluster contains a node group with ``is_proxy_gateway=true``, the requirement to have ``floating_ip_pool`` specified is applied only to the proxy node group. Other instances will be accessed through proxy instances using the standard private network. Note, the Cloudera Hadoop plugin doesn't support access to Cloudera manager through a proxy node. This means that for CDH clusters only nodes with the Cloudera manager can be designated as proxy gateway nodes. Multi region deployment ----------------------- Sahara supports multi region deployment. To enable this option each instance of sahara should have the ``os_region_name=`` parameter set in the configuration file. The following example demonstrates configuring sahara to use the ``RegionOne`` region: .. code-block:: [DEFAULT] os_region_name=RegionOne .. _non-root-users: Non-root users -------------- In cases where a proxy command is being used to access cluster instances (for example, when using namespaces or when specifying a custom proxy command), rootwrap functionality is provided to allow users other than ``root`` access to the needed operating system facilities. To use rootwrap the following configuration parameter is required to be set: .. code-block:: [DEFAULT] use_rootwrap=true Assuming you elect to leverage the default rootwrap command (``sahara-rootwrap``), you will need to perform the following additional setup steps: * Copy the provided sudoers configuration file from the local project file ``etc/sudoers.d/sahara-rootwrap`` to the system specific location, usually ``/etc/sudoers.d``. This file is setup to allow a user named ``sahara`` access to the rootwrap script. It contains the following: .. code-block:: sahara ALL = (root) NOPASSWD: /usr/bin/sahara-rootwrap /etc/sahara/rootwrap.conf * When using devstack to deploy sahara, please pay attention that you need to change user in script from ``sahara`` to ``stack``. * Copy the provided rootwrap configuration file from the local project file ``etc/sahara/rootwrap.conf`` to the system specific location, usually ``/etc/sahara``. This file contains the default configuration for rootwrap. * Copy the provided rootwrap filters file from the local project file ``etc/sahara/rootwrap.d/sahara.filters`` to the location specified in the rootwrap configuration file, usually ``/etc/sahara/rootwrap.d``. This file contains the filters that will allow the ``sahara`` user to access the ``ip netns exec``, ``nc``, and ``kill`` commands through the rootwrap (depending on ``proxy_command`` you may need to set additional filters). It should look similar to the followings: .. code-block:: [Filters] ip: IpNetnsExecFilter, ip, root nc: CommandFilter, nc, root kill: CommandFilter, kill, root If you wish to use a rootwrap command other than ``sahara-rootwrap`` you can set the following parameter in your sahara configuration file: .. code-block:: [DEFAULT] rootwrap_command='sudo sahara-rootwrap /etc/sahara/rootwrap.conf' For more information on rootwrap please refer to the `official Rootwrap documentation `_ Object Storage access using proxy users --------------------------------------- To improve security for clusters accessing files in Object Storage, sahara can be configured to use proxy users and delegated trusts for access. This behavior has been implemented to reduce the need for storing and distributing user credentials. The use of proxy users involves creating an Identity domain that will be designated as the home for these users. Proxy users will be created on demand by sahara and will only exist during a job execution which requires Object Storage access. The domain created for the proxy users must be backed by a driver that allows sahara's admin user to create new user accounts. This new domain should contain no roles, to limit the potential access of a proxy user. Once the domain has been created, sahara must be configured to use it by adding the domain name and any potential delegated roles that must be used for Object Storage access to the sahara configuration file. With the domain enabled in sahara, users will no longer be required to enter credentials for their data sources and job binaries referenced in Object Storage. Detailed instructions ^^^^^^^^^^^^^^^^^^^^^ First a domain must be created in the Identity service to hold proxy users created by sahara. This domain must have an identity backend driver that allows for sahara to create new users. The default SQL engine is sufficient but if your keystone identity is backed by LDAP or similar then domain specific configurations should be used to ensure sahara's access. Please see the :keystone-doc:`Keystone documentation ` for more information. With the domain created, sahara's configuration file should be updated to include the new domain name and any potential roles that will be needed. For this example let's assume that the name of the proxy domain is ``sahara_proxy`` and the roles needed by proxy users will be ``member`` and ``SwiftUser``. .. code-block:: [DEFAULT] use_domain_for_proxy_users=true proxy_user_domain_name=sahara_proxy proxy_user_role_names=member,SwiftUser A note on the use of roles. In the context of the proxy user, any roles specified here are roles intended to be delegated to the proxy user from the user with access to Object Storage. More specifically, any roles that are required for Object Storage access by the project owning the object store must be delegated to the proxy user for authentication to be successful. Finally, the stack administrator must ensure that images registered with sahara have the latest version of the Hadoop swift filesystem plugin installed. The sources for this plugin can be found in the `sahara extra repository`_. For more information on images or swift integration see the sahara documentation sections :ref:`building-guest-images-label` and :ref:`swift-integration-label`. .. _Sahara extra repository: https://opendev.org/openstack/sahara-extra .. _volume_instance_locality_configuration: Volume instance locality configuration -------------------------------------- The Block Storage service provides the ability to define volume instance locality to ensure that instance volumes are created on the same host as the hypervisor. The ``InstanceLocalityFilter`` provides the mechanism for the selection of a storage provider located on the same physical host as an instance. To enable this functionality for instances of a specific node group, the ``volume_local_to_instance`` field in the node group template should be set to ``true`` and some extra configurations are needed: * The cinder-volume service should be launched on every physical host and at least one physical host should run both cinder-scheduler and cinder-volume services. * ``InstanceLocalityFilter`` should be added to the list of default filters (``scheduler_default_filters`` in cinder) for the Block Storage configuration. * The Extended Server Attributes extension needs to be active in the Compute service (this is true by default in nova), so that the ``OS-EXT-SRV-ATTR:host`` property is returned when requesting instance info. * The user making the call needs to have sufficient rights for the property to be returned by the Compute service. This can be done by: * by changing nova's ``policy.yaml`` to allow the user access to the ``extended_server_attributes`` option. * by designating an account with privileged rights in the cinder configuration: .. code-block:: os_privileged_user_name = os_privileged_user_password = os_privileged_user_tenant = It should be noted that in a situation when the host has no space for volume creation, the created volume will have an ``Error`` state and can not be used. Autoconfiguration for templates ------------------------------- :doc:`configs-recommendations` NTP service configuration ------------------------- By default sahara will enable the NTP service on all cluster instances if the NTP package is included in the image (the sahara disk image builder will include NTP in all images it generates). The default NTP server will be ``pool.ntp.org``; this can be overridden using the ``default_ntp_server`` setting in the ``DEFAULT`` section of the sahara configuration file. If you are creating cluster templates using the sahara UI and would like to specify a different NTP server for a particular cluster template, use the ``URL of NTP server`` setting in the ``General Parameters`` section when you create the template. If you would like to disable NTP for a particular cluster template, deselect the ``Enable NTP service`` checkbox in the ``General Parameters`` section when you create the template. If you are creating clusters using the sahara CLI, you can specify another NTP server or disable NTP service using the examples below. If you want to enable configuring the NTP service, you should specify the following configs for the cluster: .. code-block:: { "cluster_configs": { "general": { "URL of NTP server": "your_server.net" } } } If you want to disable configuring NTP service, you should specify following configs for the cluster: .. code-block:: { "cluster_configs": { "general": { "Enable NTP service": false } } } CORS (Cross Origin Resource Sharing) Configuration -------------------------------------------------- Sahara provides direct API access to user-agents (browsers) via the HTTP CORS protocol. Detailed documentation, as well as troubleshooting examples, may be found in the :oslo.middleware-doc:`documentation of the oslo.db cross-project features `. To get started quickly, use the example configuration block below, replacing the :code:`allowed origin` field with the host(s) from which your API expects access. .. code-block:: [cors] allowed_origin=https://we.example.com:443 max_age=3600 allow_credentials=true [cors.additional_domain_1] allowed_origin=https://additional_domain_1.example.com:443 [cors.additional_domain_2] allowed_origin=https://additional_domain_2.example.com:443 For more information on Cross Origin Resource Sharing, please review the `W3C CORS specification`_. .. _W3C CORS specification: http://www.w3.org/TR/cors/ Cleanup time for incomplete clusters ------------------------------------ Sahara provides maximal time (in hours) for clusters allowed to be in states other than "Active", "Deleting" or "Error". If a cluster is not in "Active", "Deleting" or "Error" state and last update of it was longer than ``cleanup_time_for_incomplete_clusters`` hours ago then it will be deleted automatically. You can enable this feature by adding appropriate config property in the ``DEFAULT`` section (by default it set up to ``0`` value which means that automatic clean up is disabled). For example, if you want cluster to be deleted after 3 hours if it didn't leave "Starting" state then you should specify: .. code-block:: [DEFAULT] cleanup_time_for_incomplete_clusters = 3 Security Group Rules Configuration ---------------------------------- When auto_security_group is used, the amount of created security group rules may be bigger than the default values configured in ``neutron.conf``. Then the default limit should be raised up to some bigger value which is proportional to the number of cluster node groups. You can change it in ``neutron.conf`` file: .. code-block:: [quotas] quota_security_group = 1000 quota_security_group_rule = 10000 Or you can execute openstack CLI command: .. code-block:: openstack quota set --secgroups 1000 --secgroup-rules 10000 $PROJECT_ID ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/admin/configs-recommendations.rst0000664000175000017500000000360200000000000023565 0ustar00zuulzuul00000000000000:orphan: Autoconfiguring templates ========================= During the Liberty development cycle sahara implemented a tool that recommends and applies configuration values for cluster templates and node group templates. These recommendations are based on the number of specific instances and on flavors of the cluster node groups. Currently the following plugins support this feature: * CDH; * Ambari; * Spark; * the Vanilla Apache Hadoop plugin. By default this feature is enabled for all cluster templates and node group templates. If you want to disable this feature for a particular cluster or node group template you should set the ``use_autoconfig`` field to ``false``. .. NOTE Also, if you manually set configs from the list below, the recommended configs will not be applied. The following describes the settings for which sahara can recommend autoconfiguration: The Cloudera, Spark and Vanilla Apache Hadoop plugin support configuring ``dfs.replication`` (``dfs_replication`` for Cloudera plugin) which is calculated as a minimum from the amount of ``datanode`` (``HDFS_DATANODE`` for Cloudera plugin) instances in the cluster and the default value for ``dfs.replication``. The Vanilla Apache Hadoop plugin and Cloudera plugin support autoconfiguration of basic YARN and MapReduce configs. These autoconfigurations are based on the following documentation: http://docs.hortonworks.com/HDPDocuments/HDP2/HDP-2.0.9.1/bk_installing_manually_book/content/rpm-chap1-11.html The Ambari plugin has its own strategies on configuration recommendations. You can choose one of ``ALWAYS_APPLY``, ``NEVER_APPLY``, and ``ONLY_STACK_DEFAULTS_APPLY``. By default the Ambari plugin follows the ``NEVER_APPLY`` strategy. You can get more information about strategies in Ambari's official documentation: https://cwiki.apache.org/confluence/display/AMBARI/Blueprints#Blueprints-ClusterCreationTemplateStructure ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/admin/configuration-guide.rst0000664000175000017500000001600500000000000022713 0ustar00zuulzuul00000000000000Sahara Configuration Guide ========================== This guide covers the steps for a basic configuration of sahara. It will help you to configure the service in the most simple manner. Basic configuration ------------------- A full configuration file showing all possible configuration options and their defaults can be generated with the following command: .. sourcecode:: cfg $ tox -e genconfig Running this command will create a file named ``sahara.conf.sample`` in the ``etc/sahara`` directory of the project. After creating a configuration file by either generating one or starting with an empty file, edit the ``connection`` parameter in the ``[database]`` section. The URL provided here should point to an empty database. For example, the connection string for a MySQL database will be: .. sourcecode:: cfg connection=mysql+pymsql://username:password@host:port/database Next you will configure the Identity service parameters in the ``[keystone_authtoken]`` section. The ``www_authenticate_uri`` parameter should point to the public Identity API endpoint. The ``auth_url`` should point to the internal Identity API endpoint. For example: .. sourcecode:: cfg www_authenticate_uri=http://127.0.0.1:5000/v3/ auth_url=http://127.0.0.1:5000/v3/ Specify the ``username``, ``user_domain_name``, ``password``, ``project_name``. and ``project_domain_name``. These parameters must specify an Identity user who has the ``admin`` role in the given project. These credentials allow sahara to authenticate and authorize its users. Next you will configure the default Networking service. If using neutron for networking the following parameter should be set in the ``[DEFAULT]`` section: With these parameters set, sahara is ready to run. By default the sahara's log level is set to INFO. If you wish to increase the logging levels for troubleshooting, set ``debug`` to ``true`` in the ``[DEFAULT]`` section of the configuration file. Networking configuration ------------------------ By default sahara is configured to use the neutron. Additionally, if the cluster supports network namespaces the ``use_namespaces`` property can be used to enable their usage. .. sourcecode:: cfg [DEFAULT] use_namespaces=True .. note:: If a user other than ``root`` will be running the Sahara server instance and namespaces are used, some additional configuration is required, please see :ref:`non-root-users` for more information. .. _floating_ip_management: Floating IP management ++++++++++++++++++++++ During cluster setup sahara must access instances through a secure shell (SSH). To establish this connection it may use either the fixed or floating IP address of an instance. By default sahara is configured to use floating IP addresses for access. This is controlled by the ``use_floating_ips`` configuration parameter. With this setup the user has two options for ensuring that the instances in the node groups templates that requires floating IPs gain a floating IP address: * The user may specify a floating IP address pool for each node group that requires floating IPs directly. From Newton changes were made to allow the coexistence of clusters using floating IPs and clusters using fixed IPs. If ``use_floating_ips`` is True it means that the floating IPs can be used by Sahara to spawn clusters. But, differently from previous versions, this does not mean that all instances in the cluster must have floating IPs and that all clusters must use floating IPs. It is possible in a single Sahara deploy to have clusters setup using fixed IPs, clusters using floating IPs and cluster that use both. If not using floating IP addresses (``use_floating_ips=False``) sahara will use fixed IP addresses for instance management. When using neutron for the Networking service the user will be able to choose the fixed IP network for all instances in a cluster. .. _notification-configuration: Notifications configuration --------------------------- Sahara can be configured to send notifications to the OpenStack Telemetry module. To enable this functionality the following parameter ``enable`` should be set in the ``[oslo_messaging_notifications]`` section of the configuration file: .. sourcecode:: cfg [oslo_messaging_notifications] enable = true And the following parameter ``driver`` should be set in the ``[oslo_messaging_notifications]`` section of the configuration file: .. sourcecode:: cfg [oslo_messaging_notifications] driver = messaging By default sahara is configured to use RabbitMQ as its message broker. If you are using RabbitMQ as the message broker, then you should set the following parameter in the ``[DEFAULT]`` section: .. sourcecode:: cfg rpc_backend = rabbit You may also need to specify the connection parameters for your RabbitMQ installation. The following example shows the default values in the ``[oslo_messaging_rabbit]`` section which may need adjustment: .. sourcecode:: cfg rabbit_host=localhost rabbit_port=5672 rabbit_hosts=$rabbit_host:$rabbit_port rabbit_userid=guest rabbit_password=guest rabbit_virtual_host=/ .. .. _orchestration-configuration: Orchestration configuration --------------------------- By default sahara is configured to use the heat engine for instance creation. The heat engine uses the OpenStack Orchestration service to provision instances. This engine makes calls directly to the services required for instance provisioning. .. _policy-configuration-label: Policy configuration -------------------- .. warning:: JSON formatted policy file is deprecated since Sahara 15.0.0 (Xena). This `oslopolicy-convert-json-to-yaml`__ tool will migrate your existing JSON-formatted policy file to YAML in a backward-compatible way. .. __: https://docs.openstack.org/oslo.policy/victoria/cli/oslopolicy-convert-json-to-yaml.html Sahara's public API calls may be restricted to certain sets of users by using a policy configuration file. The location of the policy file(s) is controlled by the ``policy_file`` and ``policy_dirs`` parameters in the ``[oslo_policy]`` section. By default sahara will search for a ``policy.yaml`` file in the same directory as the ``sahara.conf`` configuration file. Examples ++++++++ Example 1. Allow all method to all users (default policy). .. sourcecode:: json { "default": "" } Example 2. Disallow image registry manipulations to non-admin users. .. sourcecode:: json { "default": "", "data-processing:images:register": "role:admin", "data-processing:images:unregister": "role:admin", "data-processing:images:add_tags": "role:admin", "data-processing:images:remove_tags": "role:admin" } API configuration ----------------- Sahara uses the ``api-paste.ini`` file to configure the data processing API service. For middleware injection sahara uses pastedeploy library. The location of the api-paste file is controlled by the ``api_paste_config`` parameter in the ``[default]`` section. By default sahara will search for a ``api-paste.ini`` file in the same directory as the configuration file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/admin/index.rst0000664000175000017500000000025400000000000020057 0ustar00zuulzuul00000000000000====================== Operator Documentation ====================== .. toctree:: :maxdepth: 2 configuration-guide advanced-configuration-guide upgrade-guide ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/admin/upgrade-guide.rst0000664000175000017500000001425600000000000021501 0ustar00zuulzuul00000000000000Sahara Upgrade Guide ==================== This page contains details about upgrading sahara between releases such as configuration file updates, database migrations, and architectural changes. Icehouse -> Juno ---------------- Main binary renamed to sahara-all +++++++++++++++++++++++++++++++++ The All-In-One sahara binary has been renamed from ``sahara-api`` to ``sahara-all``. The new name should be used in all cases where the All-In-One sahara is desired. Authentication middleware changes +++++++++++++++++++++++++++++++++ The custom auth_token middleware has been deprecated in favor of the keystone middleware. This change requires an update to the sahara configuration file. To update your configuration file you should replace the following parameters from the ``[DEFAULT]`` section with the new parameters in the ``[keystone_authtoken]`` section: +-----------------------+--------------------+ | Old parameter name | New parameter name | +=======================+====================+ | os_admin_username | admin_user | +-----------------------+--------------------+ | os_admin_password | admin_password | +-----------------------+--------------------+ | os_admin_tenant_name | admin_tenant_name | +-----------------------+--------------------+ Additionally, the parameters ``os_auth_protocol``, ``os_auth_host``, and ``os_auth_port`` have been combined to create the ``auth_uri`` and ``identity_uri`` parameters. These new parameters should be full URIs to the keystone public and admin endpoints, respectively. For more information about these configuration parameters please see the :doc:`../admin/configuration-guide`. Database package changes ++++++++++++++++++++++++ The oslo based code from sahara.openstack.common.db has been replaced by the usage of the oslo.db package. This change does not require any update to sahara's configuration file. Additionally, the usage of SQLite databases has been deprecated. Please use MySQL or PostgreSQL databases for sahara. SQLite has been deprecated because it does not, and is not going to, support the ``ALTER COLUMN`` and ``DROP COLUMN`` commands required for migrations between versions. For more information please see http://www.sqlite.org/omitted.html Sahara integration into OpenStack Dashboard +++++++++++++++++++++++++++++++++++++++++++ The sahara dashboard package has been deprecated in the Juno release. The functionality of the dashboard has been fully incorporated into the OpenStack Dashboard. The sahara interface is available under the "Project" -> "Data Processing" tab. The Data processing service endpoints must be registered in the Identity service catalog for the Dashboard to properly recognize and display those user interface components. For more details on this process please see :ref:`registering Sahara in installation guide `. The `sahara-dashboard `_ project is now used solely to host sahara user interface integration tests. Virtual machine user name changes +++++++++++++++++++++++++++++++++ The HEAT infrastructure engine has been updated to use the same rules for instance user names as the direct engine. In previous releases the user name for instances created by sahara using HEAT was always 'ec2-user'. As of Juno, the user name is taken from the image registry as described in the :doc:`../user/registering-image` document. This change breaks backward compatibility for clusters created using the HEAT infrastructure engine prior to the Juno release. Clusters will continue to operate, but we do not recommended using the scaling operations with them. Anti affinity implementation changed ++++++++++++++++++++++++++++++++++++ Starting with the Juno release the anti affinity feature is implemented using server groups. From the user perspective there will be no noticeable changes with this feature. Internally this change has introduced the following behavior: 1) Server group objects will be created for any clusters with anti affinity enabled. 2) Affected instances on the same host will not be allowed even if they do not have common processes. Prior to Juno, instances with differing processes were allowed on the same host. The new implementation guarantees that all affected instances will be on different hosts regardless of their processes. The new anti affinity implementation will only be applied for new clusters. Clusters created with previous versions will continue to operate under the older implementation, this applies to scaling operations on these clusters as well. Juno -> Kilo ------------ Sahara requires policy configuration ++++++++++++++++++++++++++++++++++++ Sahara now requires a policy configuration file. The ``policy.json`` file should be placed in the same directory as the sahara configuration file or specified using the ``policy_file`` parameter. For more details about the policy file please see the :ref:`policy section in the configuration guide `. Kilo -> Liberty --------------- Direct engine deprecation +++++++++++++++++++++++++ In the Liberty release the direct infrastructure engine has been deprecated and the heat infrastructure engine is now default. This means, that it is preferable to use heat engine instead now. In the Liberty release you can continue to operate clusters with the direct engine (create, delete, scale). Using heat engine only the delete operation is available on clusters that were created by the direct engine. After the Liberty release the direct engine will be removed, this means that you will only be able to delete clusters created with the direct engine. Policy namespace changed (policy.json) ++++++++++++++++++++++++++++++++++++++ The "data-processing:" namespace has been added to the beginning of the all Sahara's policy based actions, so, you need to update the policy.json file by prepending all actions with "data-processing:". Liberty -> Mitaka ----------------- Direct engine is removed. Mitaka -> Newton ---------------- Sahara CLI command is deprecated, please use OpenStack Client. .. note:: Since Mitaka release sahara actively uses release notes so you can see all required upgrade actions here: https://docs.openstack.org/releasenotes/sahara/ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.645891 sahara-16.0.0/doc/source/cli/0000775000175000017500000000000000000000000015674 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/cli/index.rst0000664000175000017500000000031400000000000017533 0ustar00zuulzuul00000000000000======================== Sahara CLI Documentation ======================== In this section you will find information on Sahara’s command line interface. .. toctree:: :maxdepth: 1 sahara-status ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/cli/sahara-status.rst0000664000175000017500000000363700000000000021217 0ustar00zuulzuul00000000000000============= sahara-status ============= ---------------------------------------- CLI interface for Sahara status commands ---------------------------------------- Synopsis ======== :: sahara-status [] Description =========== :program:`sahara-status` is a tool that provides routines for checking the status of a Sahara deployment. Options ======= The standard pattern for executing a :program:`sahara-status` command is:: sahara-status [] Run without arguments to see a list of available command categories:: sahara-status Categories are: * ``upgrade`` Detailed descriptions are below: You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: sahara-status upgrade These sections describe the available categories and arguments for :program:`sahara-status`. Upgrade ~~~~~~~ .. _sahara-status-checks: ``sahara-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. For example, missing or changed configuration options, incompatible object states, or other conditions that could lead to failures while upgrading. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **10.0.0 (Stein)** * Sample check to be filled in with checks as they are added in Stein. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/conf.py0000664000175000017500000002125500000000000016431 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) sys.path.insert(0, os.path.abspath('../../sahara')) sys.path.append(os.path.abspath('..')) sys.path.append(os.path.abspath('../bin')) # -- General configuration ----------------------------------------------------- on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain', 'oslo_config.sphinxconfiggen', 'oslo_config.sphinxext', 'openstackdocstheme'] # openstackdocstheme options repository_name = 'openstack/sahara' use_storyboard = True config_generator_config_file = 'config-generator.conf' config_sample_basename = 'sahara' openstack_projects = [ 'barbican', 'castellan', 'designate', 'devstack', 'ironic', 'keystone', 'keystoneauth', 'kolla-ansible', 'neutron', 'nova', 'oslo.messaging', 'oslo.middleware', 'sahara-plugin-ambari', 'sahara-plugin-cdh', 'sahara-plugin-mapr', 'sahara-plugin-spark', 'sahara-plugin-storm', 'sahara-plugin-vanilla', 'tooz' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # Add any paths that contain "extra" files, such as .htaccess or # robots.txt. html_extra_path = ['_extra'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2014, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if on_rtd: html_theme_path = ['.'] html_theme = '_theme_rtd' html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = {"show_other_versions": "True",} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". html_title = 'Sahara' # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { 'index': ['sidebarlinks.html', 'localtoc.html', 'searchbox.html', 'sourcelink.html'], '**': ['localtoc.html', 'relations.html', 'searchbox.html', 'sourcelink.html'] } # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'SaharaDoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'saharadoc.tex', 'Sahara', 'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'sahara', 'Sahara', ['OpenStack Foundation'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Sahara', 'Sahara', 'OpenStack Foundation', 'Sahara', 'Sahara', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/config-generator.conf0000664000175000017500000000066600000000000021235 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 79 namespace = sahara.config namespace = keystonemiddleware.auth_token namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.middleware.healthcheck namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = oslo.service.periodic_task namespace = oslo.service.sslutils namespace = oslo.service.wsgi ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.645891 sahara-16.0.0/doc/source/configuration/0000775000175000017500000000000000000000000017774 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/configuration/descriptionconfig.rst0000664000175000017500000000034700000000000024243 0ustar00zuulzuul00000000000000Configuration options ===================== This section provides a list of the configuration options that can be set in the sahara configuration file. .. show-options:: :config-file: tools/config/config-generator.sahara.conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/configuration/index.rst0000664000175000017500000000021500000000000021633 0ustar00zuulzuul00000000000000======================= Configuration Reference ======================= .. toctree:: :maxdepth: 1 descriptionconfig sampleconfig ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/configuration/sampleconfig.rst0000664000175000017500000000027100000000000023175 0ustar00zuulzuul00000000000000Sample sahara.conf file ======================= This is an automatically generated sample of the sahara.conf file. .. literalinclude:: ../sample.config :language: ini :linenos: ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.649891 sahara-16.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017477 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/adding-database-migrations.rst0000664000175000017500000001023600000000000025375 0ustar00zuulzuul00000000000000Adding Database Migrations ========================== The migrations in ``sahara/db/migration/alembic_migrations/versions`` contain the changes needed to migrate between Sahara database revisions. A migration occurs by executing a script that details the changes needed to upgrade or downgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially. The scripts are executed by Sahara's migration wrapper which uses the Alembic library to manage the migration. Sahara supports migration from Icehouse or later. Any code modifications that change the structure of the database require a migration script so that previously existing databases will continue to function when the new code is released. This page gives a brief overview of how to add the migration. Generate a New Migration Script +++++++++++++++++++++++++++++++ New migration scripts can be generated using the ``sahara-db-manage`` command. To generate a migration stub to be filled in by the developer:: $ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" To autogenerate a migration script that reflects the current structure of the database:: $ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" --autogenerate Each of these commands will create a file of the form ``revision_description`` where ``revision`` is a string generated by Alembic and ``description`` is based on the text passed with the ``-m`` option. Follow the Sahara Naming Convention +++++++++++++++++++++++++++++++++++ By convention Sahara uses 3-digit revision numbers, and this scheme differs from the strings generated by Alembic. Consequently, it's necessary to rename the generated script and modify the revision identifiers in the script. Open the new script and look for the variable ``down_revision``. The value should be a 3-digit numeric string, and it identifies the current revision number of the database. Set the ``revision`` value to the ``down_revision`` value + 1. For example, the lines:: # revision identifiers, used by Alembic. revision = '507eb70202af' down_revision = '006' will become:: # revision identifiers, used by Alembic. revision = '007' down_revision = '006' Modify any comments in the file to match the changes and rename the file to match the new revision number:: $ mv 507eb70202af_my_new_revision.py 007_my_new_revision.py Add Alembic Operations to the Script ++++++++++++++++++++++++++++++++++++ The migration script contains method ``upgrade()``. Sahara has not supported downgrades since the Kilo release. Fill in this method with the appropriate Alembic operations to perform upgrades. In the above example, an upgrade will move from revision '006' to revision '007'. Command Summary for sahara-db-manage ++++++++++++++++++++++++++++++++++++ You can upgrade to the latest database version via:: $ sahara-db-manage --config-file /path/to/sahara.conf upgrade head To check the current database version:: $ sahara-db-manage --config-file /path/to/sahara.conf current To create a script to run the migration offline:: $ sahara-db-manage --config-file /path/to/sahara.conf upgrade head --sql To run the offline migration between specific migration versions:: $ sahara-db-manage --config-file /path/to/sahara.conf upgrade : --sql To upgrade the database incrementally:: $ sahara-db-manage --config-file /path/to/sahara.conf upgrade --delta <# of revs> To create a new revision:: $ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" --autogenerate To create a blank file:: $ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" This command does not perform any migrations, it only sets the revision. Revision may be any existing revision. Use this command carefully:: $ sahara-db-manage --config-file /path/to/sahara.conf stamp To verify that the timeline does branch, you can run this command:: $ sahara-db-manage --config-file /path/to/sahara.conf check_migration If the migration path does branch, you can find the branch point via:: $ sahara-db-manage --config-file /path/to/sahara.conf history ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/apiv2.rst0000664000175000017500000001001100000000000021243 0ustar00zuulzuul00000000000000API Version 2 Development ========================= The sahara project is currently in the process of creating a new RESTful application programming interface (API). This interface is by-default enabled, although it remains experimental. This document defines the steps necessary to enable and communicate with the new API. This API has a few fundamental changes from the previous APIs and they should be noted before proceeding with development work. .. warning:: This API is currently marked as experimental. It is not supported by the sahara python client. These instructions are included purely for developers who wish to help participate in the development effort. Enabling the experimental API ----------------------------- There are a few changes to the WSGI pipeline that must be made to enable the new v2 API. These changes will leave the 1.0 and 1.1 API versions in place and will not adjust their communication parameters. To begin, uncomment, or add, the following sections in your api-paste.ini file: .. sourcecode:: ini [app:sahara_apiv2] paste.app_factory = sahara.api.middleware.sahara_middleware:RouterV2.factory [filter:auth_validator_v2] paste.filter_factory = sahara.api.middleware.auth_valid:AuthValidatorV2.factory These lines define a new authentication filter for the v2 API, and define the application that will handle the new calls. With these new entries in the paste configuration, we can now enable them with the following changes to the api-paste.ini file: .. sourcecode:: ini [pipeline:sahara] pipeline = cors request_id acl auth_validator_v2 sahara_api [composite:sahara_api] use = egg:Paste#urlmap /: sahara_apiv2 There are 2 significant changes occurring here; changing the authentication validator in the pipeline, and changing the root "/" application to the new v2 handler. At this point the sahara API server should be configured to accept requests on the new v2 endpoints. Communicating with the v2 API ----------------------------- The v2 API makes at least one major change from the previous versions, removing the OpenStack project identifier from the URL. Now users of the API do not provide their project ID explictly; instead we fully trust keystonemiddeware to provide it in the WSGI environment based on the given user token. For example, in previous versions of the API, a call to get the list of clusters for project "12345678-1234-1234-1234-123456789ABC" would have been made as follows:: GET /v1.1/12345678-1234-1234-1234-123456789ABC/clusters X-Auth-Token: {valid auth token} This call would now be made to the following URL:: GET /v2/clusters X-Auth-Token: {valid auth token} Using a tool like `HTTPie `_, the same request could be made like this:: $ httpie http://{sahara service ip:port}/v2/clusters \ X-Auth-Token:{valid auth token} Following the implementation progress ------------------------------------- As the creation of this API will be under regular change until it moves out of the experimental phase, a wiki page has been established to help track the progress. https://wiki.openstack.org/wiki/Sahara/api-v2 This page will help to coordinate the various reviews, specs, and work items that are a continuing facet of this work. The API service layer --------------------- When contributing to the version 2 API, it will be necessary to add code that modifies the data and behavior of HTTP calls as they are sent to and from the processing engine and data abstraction layers. Most frequently in the sahara codebase, these interactions are handled in the modules of the ``sahara.service.api`` package. This package contains code for all versions of the API and follows a namespace mapping that is similar to the routing functions of ``sahara.api`` Although these modules are not the definitive end of all answers to API related code questions, they are a solid starting point when examining the extent of new work. Furthermore, they serve as a central point to begin API debugging efforts when the need arises. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000476100000000000022750 0ustar00zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Sahara. Communication ~~~~~~~~~~~~~ * If you have something to discuss use `OpenStack development mail-list `_. Prefix the mail subject with ``[sahara]`` * Join ``#openstack-sahara`` IRC channel on `OFTC `_ * Attend Sahara team meetings * Weekly on Thursdays at 1400 UTC * IRC channel: ``#openstack-meeting-3`` Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~ * The core team has coverage in the timezones of Europe and the Americas. * Just pop over to IRC; we keep a close eye on it! * You can also find the email addresses of the core team `here https://review.opendev.org/#/admin/groups/133,members>`. New Feature Planning ~~~~~~~~~~~~~~~~~~~~ Sahara uses specs to track feature requests. They provide a high-level summary of proposed changes and track associated commits. Sahara also uses specs for in-depth descriptions and discussions of blueprints. Specs follow a defined format and are submitted as change requests to the openstack/sahara-specs repository. Task Tracking ~~~~~~~~~~~~~ We track our tasks in Storyboard. The Sahara project group homepage on Storyboard is https://storyboard.openstack.org/#!/project_group/sahara. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' or 'new-contributor' tag. Reporting a Bug ~~~~~~~~~~~~~~~ You found an issue and want to make sure we are aware of it? You can do so on https://storyboard.openstack.org/#!/project_group/sahara. Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ Typically two +2s are required before merging. Project Team Lead Duties ~~~~~~~~~~~~~~~~~~~~~~~~ If you are the PTL of Sahara then you should follow the `PTL guide `_. You should also keep track of new versions of the various Hadoop distros/components coming out (this can also be delegated to another contributor, but the PTL needs to track it either way). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/dashboard-dev-environment-guide.rst0000664000175000017500000000753000000000000026376 0ustar00zuulzuul00000000000000Sahara UI Dev Environment Setup =============================== This page describes how to setup Horizon for developing Sahara by either installing it as part of DevStack with Sahara or installing it in an isolated environment and running from the command line. Install as a part of DevStack ----------------------------- See the `DevStack guide `_ for more information on installing and configuring DevStack with Sahara. Sahara UI can be installed as a DevStack plugin by adding the following line to your ``local.conf`` file .. sourcecode:: bash # Enable sahara-dashboard enable_plugin sahara-dashboard https://opendev.org/openstack/sahara-dashboard Isolated Dashboard for Sahara ----------------------------- These installation steps serve two purposes: 1. Setup a dev environment 2. Setup an isolated Dashboard for Sahara **Note** The host where you are going to perform installation has to be able to connect to all OpenStack endpoints. You can list all available endpoints using the following command: .. sourcecode:: console $ openstack endpoint list You can list the registered services with this command: .. sourcecode:: console $ openstack service list Sahara service should be present in keystone service list with service type *data-processing* 1. Install prerequisites .. sourcecode:: console $ sudo apt-get update $ sudo apt-get install git-core python-dev gcc python-setuptools \ python-virtualenv node-less libssl-dev libffi-dev libxslt-dev .. On Ubuntu 12.10 and higher you have to install the following lib as well: .. sourcecode:: console $ sudo apt-get install nodejs-legacy .. 2. Checkout Horizon from git and switch to your version of OpenStack Here is an example: .. sourcecode:: console $ git clone https://opendev.org/openstack/horizon/ {HORIZON_DIR} .. Then install the virtual environment: .. sourcecode:: console $ python {HORIZON_DIR}/tools/install_venv.py .. 3. Create a ``local_settings.py`` file .. sourcecode:: console $ cp {HORIZON_DIR}/openstack_dashboard/local/local_settings.py.example \ {HORIZON_DIR}/openstack_dashboard/local/local_settings.py .. 4. Modify ``{HORIZON_DIR}/openstack_dashboard/local/local_settings.py`` Set the proper values for host and url variables: .. sourcecode:: python OPENSTACK_HOST = "ip of your controller" .. If you wish to disable floating IP options during node group template creation, add the following parameter: .. sourcecode:: python SAHARA_FLOATING_IP_DISABLED = True .. 5. Clone sahara-dashboard repository and checkout the desired branch .. sourcecode:: console $ git clone https://opendev.org/openstack/sahara-dashboard/ \ {SAHARA_DASHBOARD_DIR} .. 6. Copy plugin-enabling files from sahara-dashboard repository to horizon .. sourcecode:: console $ cp -a {SAHARA_DASHBOARD_DIR}/sahara_dashboard/enabled/* {HORIZON_DIR}/openstack_dashboard/local/enabled/ .. 7. Install sahara-dashboard project into your horizon virtualenv in editable mode .. sourcecode:: console $ . {HORIZON_DIR}/.venv/bin/activate $ pip install -e {SAHARA_DASHBOARD_DIR} .. 8. Start Horizon .. sourcecode:: console $ . {HORIZON_DIR}/.venv/bin/activate $ python {HORIZON_DIR}/manage.py runserver 0.0.0.0:8080 .. This will start Horizon in debug mode. That means the logs will be written to console and if any exceptions happen, you will see the stack-trace rendered as a web-page. Debug mode can be disabled by changing ``DEBUG=True`` to ``False`` in ``local_settings.py``. In that case Horizon should be started slightly differently, otherwise it will not serve static files: .. sourcecode:: console $ . {HORIZON_DIR}/.venv/bin/activate $ python {HORIZON_DIR}/manage.py runserver --insecure 0.0.0.0:8080 .. .. note:: It is not recommended to use Horizon in this mode for production. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/development-environment.rst0000664000175000017500000000671700000000000025130 0ustar00zuulzuul00000000000000Setting Up a Development Environment ==================================== This page describes how to setup a Sahara development environment by either installing it as a part of DevStack or pointing a local running instance at an external OpenStack. You should be able to debug and test your changes without having to deploy Sahara. Setup a Local Environment with Sahara inside DevStack ----------------------------------------------------- See :doc:`the main article `. Setup a Local Environment with an external OpenStack ---------------------------------------------------- 1. Install prerequisites On OS X Systems: .. sourcecode:: console # we actually need pip, which is part of python package $ brew install python mysql postgresql rabbitmq $ pip install virtualenv tox On Ubuntu: .. sourcecode:: console $ sudo apt-get update $ sudo apt-get install git-core python-dev python-virtualenv gcc libpq-dev libmysqlclient-dev python-pip rabbitmq-server $ sudo pip install tox On Red Hat and related distributions (CentOS/Fedora/RHEL/Scientific Linux): .. sourcecode:: console $ sudo yum install git-core python-devel python-virtualenv gcc python-pip mariadb-devel postgresql-devel erlang $ sudo pip install tox $ sudo wget http://www.rabbitmq.com/releases/rabbitmq-server/v3.2.2/rabbitmq-server-3.2.2-1.noarch.rpm $ sudo rpm --import http://www.rabbitmq.com/rabbitmq-signing-key-public.asc $ sudo yum install rabbitmq-server-3.2.2-1.noarch.rpm On openSUSE-based distributions (SLES 12, openSUSE, Factory or Tumbleweed): .. sourcecode:: console $ sudo zypper in gcc git libmysqlclient-devel postgresql-devel python-devel python-pip python-tox python-virtualenv 2. Grab the code .. sourcecode:: console $ git clone https://opendev.org/openstack/sahara.git $ cd sahara 3. Generate Sahara sample using tox .. sourcecode:: console tox -e genconfig 4. Create config file from the sample .. sourcecode:: console $ cp ./etc/sahara/sahara.conf.sample ./etc/sahara/sahara.conf 5. Look through the sahara.conf and modify parameter values as needed For details see :doc:`Sahara Configuration Guide <../admin/configuration-guide>` 6. Create database schema .. sourcecode:: console $ tox -e venv -- sahara-db-manage --config-file etc/sahara/sahara.conf upgrade head 7. To start Sahara API and Engine processes call .. sourcecode:: console $ tox -e venv -- sahara-api --config-file etc/sahara/sahara.conf --debug $ tox -e venv -- sahara-engine --config-file etc/sahara/sahara.conf --debug Setup local OpenStack dashboard with Sahara plugin -------------------------------------------------- .. toctree:: :maxdepth: 1 dashboard-dev-environment-guide Tips and tricks for dev environment ----------------------------------- 1. Pip speedup Add the following lines to ~/.pip/pip.conf .. sourcecode:: cfg [global] download-cache = /home//.pip/cache index-url = Note that the ``~/.pip/cache`` folder should be created manually. 2. Git hook for fast checks Just add the following lines to .git/hooks/pre-commit and do chmod +x for it. .. sourcecode:: console #!/bin/sh # Run fast checks (PEP8 style check and PyFlakes fast static analysis) tox -epep8 You can add also other checks for pre-push, for example pylint (see below) and tests (tox -epy27). 3. Running static analysis (PyLint) Just run the following command .. sourcecode:: console tox -e pylint ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/development-guidelines.rst0000664000175000017500000002053300000000000024704 0ustar00zuulzuul00000000000000Development Guidelines ====================== Coding Guidelines ----------------- For all the Python code in Sahara we have a rule - it should pass `PEP 8`_. All Bash code should pass `bashate`_. To check your code against PEP 8 and bashate run: .. sourcecode:: console $ tox -e pep8 .. note:: For more details on coding guidelines see file ``HACKING.rst`` in the root of Sahara repo. Static analysis --------------- The static analysis checks are optional in Sahara, but they are still very useful. The gate job will inform you if the number of static analysis warnings has increased after your change. We recommend to always check the static warnings. To run check first commit your change, then execute the following command: .. sourcecode:: console $ tox -e pylint Modification of Upstream Files ------------------------------ We never modify upstream files in Sahara. Any changes in upstream files should be made in the upstream project and then merged back in to Sahara. This includes whitespace changes, comments, and typos. Any change requests containing upstream file modifications are almost certain to receive lots of negative reviews. Be warned. Examples of upstream files are default xml configuration files used to configure Hadoop, or code imported from the OpenStack Oslo project. The xml files will usually be found in ``resource`` directories with an accompanying ``README`` file that identifies where the files came from. For example: .. sourcecode:: console $ pwd /home/me/sahara/sahara/plugins/vanilla/v2_7_1/resources $ ls core-default.xml hdfs-default.xml oozie-default.xml README.rst create_oozie_db.sql mapred-default.xml post_conf.template yarn-default.xml .. Testing Guidelines ------------------ Sahara has a suite of tests that are run on all submitted code, and it is recommended that developers execute the tests themselves to catch regressions early. Developers are also expected to keep the test suite up-to-date with any submitted code changes. Unit tests are located at ``sahara/tests/unit``. Sahara's suite of unit tests can be executed in an isolated environment with `Tox`_. To execute the unit tests run the following from the root of Sahara repo: .. sourcecode:: console $ tox -e py27 Documentation Guidelines ------------------------ All Sahara docs are written using Sphinx / RST and located in the main repo in the ``doc`` directory. You can add or edit pages here to update the https://docs.openstack.org/sahara/latest/ site. The documentation in docstrings should follow the `PEP 257`_ conventions (as mentioned in the `PEP 8`_ guidelines). More specifically: 1. Triple quotes should be used for all docstrings. 2. If the docstring is simple and fits on one line, then just use one line. 3. For docstrings that take multiple lines, there should be a newline after the opening quotes, and before the closing quotes. 4. `Sphinx`_ is used to build documentation, so use the restructured text markup to designate parameters, return values, etc. Run the following command to build docs locally. .. sourcecode:: console $ tox -e docs After it you can access generated docs in ``doc/build/`` directory, for example, main page - ``doc/build/html/index.html``. To make the doc generation process faster you can use: .. sourcecode:: console $ SPHINX_DEBUG=1 tox -e docs To avoid sahara reinstallation to virtual env each time you want to rebuild docs you can use the following command (it can be executed only after running ``tox -e docs`` first time): .. sourcecode:: console $ SPHINX_DEBUG=1 .tox/docs/bin/python setup.py build_sphinx .. note:: For more details on documentation guidelines see HACKING.rst in the root of the Sahara repo. .. _PEP 8: http://www.python.org/dev/peps/pep-0008/ .. _bashate: https://opendev.org/openstack/bashate .. _PEP 257: http://www.python.org/dev/peps/pep-0257/ .. _Tox: http://tox.testrun.org/ .. _Sphinx: http://sphinx.pocoo.org/markup/index.html Event log Guidelines -------------------- Currently Sahara keeps useful information about provisioning for each cluster. Cluster provisioning can be represented as a linear series of provisioning steps, which are executed one after another. Each step may consist of several events. The number of events depends on the step and the number of instances in the cluster. Also each event can contain information about its cluster, instance, and node group. In case of errors, events contain useful information for identifying the error. Additionally, each exception in sahara contains a unique identifier that allows the user to find extra information about that error in the sahara logs. You can see an example of provisioning progress information here: https://docs.openstack.org/api-ref/data-processing/#event-log This means that if you add some important phase for cluster provisioning to the sahara code, it's recommended to add a new provisioning step for this phase. This will allow users to use event log for handling errors during this phase. Sahara already has special utils for operating provisioning steps and events in the module ``sahara/utils/cluster_progress_ops.py``. .. note:: It's strictly recommended not to use ``conductor`` event log ops directly to assign events and operate provisioning steps. .. note:: You should not start a new provisioning step until the previous step has successfully completed. .. note:: It's strictly recommended to use ``event_wrapper`` for event handling. OpenStack client usage guidelines --------------------------------- The sahara project uses several OpenStack clients internally. These clients are all wrapped by utility functions which make using them more convenient. When developing sahara, if you need to use an OpenStack client you should check the ``sahara.utils.openstack`` package for the appropriate one. When developing new OpenStack client interactions in sahara, it is important to understand the ``sahara.service.sessions`` package and the usage of the keystone ``Session`` and auth plugin objects (for example, ``Token`` and ``Password``). Sahara is migrating all clients to use this authentication methodology, where available. For more information on using sessions with keystone, please see :keystoneauth-doc:`the keystoneauth documentation ` Storing sensitive information ----------------------------- During the course of development, there is often cause to store sensitive information (for example, login credentials) in the records for a cluster, job, or some other record. Storing secret information this way is **not** safe. To mitigate the risk of storing this information, sahara provides access to the OpenStack Key Manager service (implemented by the :barbican-doc:`barbican project <>`) through the :castellan-doc:`castellan library <>`. To utilize the external key manager, the functions in ``sahara.service.castellan.utils`` are provided as wrappers around the castellan library. These functions allow a developer to store, retrieve, and delete secrets from the manager. Secrets that are managed through the key manager have an identifier associated with them. These identifiers are considered safe to store in the database. The following are some examples of working with secrets in the sahara codebase. These examples are considered basic, any developer wishing to learn more about the advanced features of storing secrets should look to the code and docstrings contained in the ``sahara.service.castellan`` module. **Storing a secret** .. sourcecode:: python from sahara.service.castellan import utils as key_manager password = 'SooperSecretPassword' identifier = key_manager.store_secret(password) **Retrieving a secret** .. sourcecode:: python from sahara.service.castellan import utils as key_manager password = key_manager.get_secret(identifier) **Deleting a secret** .. sourcecode:: python from sahara.service.castellan import utils as key_manager key_manager.delete_secret(identifier) When storing secrets through this interface it is important to remember that if an external key manager is being used, each stored secret creates an entry in an external service. When you are finished using the secret it is good practice to delete it, as not doing so may leave artifacts in those external services. For more information on configuring sahara to use the OpenStack Key Manager service, see :ref:`external_key_manager_usage`. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/devstack.rst0000664000175000017500000001340200000000000022035 0ustar00zuulzuul00000000000000Setup DevStack ============== DevStack can be installed on Fedora, Ubuntu, and CentOS. For supported versions see `DevStack documentation `_ We recommend that you install DevStack in a VM, rather than on your main system. That way you may avoid contamination of your system. You may find hypervisor and VM requirements in the next section. If you still want to install DevStack on your baremetal system, just skip the next section and read further. Start VM and set up OS ---------------------- In order to run DevStack in a local VM, you need to start by installing a guest with Ubuntu 14.04 server. Download an image file from `Ubuntu's web site `_ and create a new guest from it. Virtualization solution must support nested virtualization. Without nested virtualization VMs running inside the DevStack will be extremely slow lacking hardware acceleration, i.e. you will run QEMU VMs without KVM. On Linux QEMU/KVM supports nested virtualization, on Mac OS - VMware Fusion. VMware Fusion requires adjustments to run VM with fixed IP. You may find instructions which can help :ref:`below `. Start a new VM with Ubuntu Server 14.04. Recommended settings: - Processor - at least 2 cores - Memory - at least 8GB - Hard Drive - at least 60GB When allocating CPUs and RAM to the DevStack, assess how big clusters you want to run. A single Hadoop VM needs at least 1 cpu and 1G of RAM to run. While it is possible for several VMs to share a single cpu core, remember that they can't share the RAM. After you installed the VM, connect to it via SSH and proceed with the instructions below. Install DevStack ---------------- The instructions assume that you've decided to install DevStack into Ubuntu 14.04 system. **Note:** Make sure to use bash, as other shells are not fully compatible and may cause hard to debug problems. 1. Clone DevStack: .. sourcecode:: console $ sudo apt-get install git-core $ git clone https://opendev.org/openstack/devstack.git 2. Create the file ``local.conf`` in devstack directory with the following content: .. sourcecode:: bash [[local|localrc]] ADMIN_PASSWORD=nova MYSQL_PASSWORD=nova RABBIT_PASSWORD=nova SERVICE_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=nova # Enable Swift enable_service s-proxy s-object s-container s-account SWIFT_HASH=66a3d6b56c1f479c8b4e70ab5c2000f5 SWIFT_REPLICAS=1 SWIFT_DATA_DIR=$DEST/data # Force checkout prerequisites # FORCE_PREREQ=1 # keystone is now configured by default to use PKI as the token format # which produces huge tokens. # set UUID as keystone token format which is much shorter and easier to # work with. KEYSTONE_TOKEN_FORMAT=UUID # Change the FLOATING_RANGE to whatever IPs VM is working in. # In NAT mode it is the subnet VMware Fusion provides, in bridged mode # it is your local network. But only use the top end of the network by # using a /27 and starting at the 224 octet. FLOATING_RANGE=192.168.55.224/27 # Set ``OFFLINE`` to ``True`` to configure ``stack.sh`` to run cleanly # without Internet access. ``stack.sh`` must have been previously run # with Internet access to install prerequisites and fetch repositories. # OFFLINE=True # Enable sahara enable_plugin sahara https://opendev.org/openstack/sahara # Enable heat enable_plugin heat https://opendev.org/openstack/heat In cases where you need to specify a git refspec (branch, tag, or commit hash) for the sahara in-tree devstack plugin (or sahara repo), it should be appended to the git repo URL as follows: .. sourcecode:: bash enable_plugin sahara https://opendev.org/openstack/sahara 3. Sahara can send notifications to Ceilometer, if Ceilometer is enabled. If you want to enable Ceilometer add the following lines to the ``local.conf`` file: .. sourcecode:: bash enable_plugin ceilometer https://opendev.org/openstack/ceilometer 4. Start DevStack: .. sourcecode:: console $ ./stack.sh 5. Once the previous step is finished Devstack will print a Horizon URL. Navigate to this URL and login with login "admin" and password from ``local.conf``. 6. Congratulations! You have OpenStack running in your VM and you're ready to launch VMs inside that VM. :) Managing sahara in DevStack --------------------------- If you install DevStack with sahara included you can rejoin screen with the ``screen -c stack-screenrc`` command and switch to the ``sahara`` tab. Here you can manage the sahara service as other OpenStack services. Sahara source code is located at ``$DEST/sahara`` which is usually ``/opt/stack/sahara``. .. _fusion-fixed-ip: Setting fixed IP address for VMware Fusion VM --------------------------------------------- 1. Open file ``/Library/Preferences/VMware Fusion/vmnet8/dhcpd.conf`` 2. There is a block named "subnet". It might look like this: .. sourcecode:: text subnet 192.168.55.0 netmask 255.255.255.0 { range 192.168.55.128 192.168.55.254; 3. You need to pick an IP address outside of that range. For example - ``192.168.55.20`` 4. Copy VM MAC address from VM settings->Network->Advanced 5. Append the following block to file ``dhcpd.conf`` (don't forget to replace ``VM_HOSTNAME`` and ``VM_MAC_ADDRESS`` with actual values): .. sourcecode:: text host VM_HOSTNAME { hardware ethernet VM_MAC_ADDRESS; fixed-address 192.168.55.20; } 6. Now quit all the VMware Fusion applications and restart vmnet: .. sourcecode:: console $ sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --stop $ sudo /Applications/VMware\ Fusion.app/Contents/Library/vmnet-cli --start 7. Now start your VM; it should have new fixed IP address. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/gerrit.rst0000664000175000017500000000101300000000000021520 0ustar00zuulzuul00000000000000Code Reviews with Gerrit ======================== Sahara uses the `Gerrit`_ tool to review proposed code changes. The review site is https://review.opendev.org. Gerrit is a complete replacement for Github pull requests. `All Github pull requests to the Sahara repository will be ignored`. See `Development Workflow`_ for information about how to get started using Gerrit. .. _Gerrit: http://code.google.com/p/gerrit .. _Development Workflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/how-to-build-oozie.rst0000664000175000017500000000347400000000000023676 0ustar00zuulzuul00000000000000How to build Oozie ================== .. note:: Apache does not make Oozie builds, so it has to be built manually. Download -------- * Download tarball from `Apache mirror `_ * Unpack it with .. sourcecode:: console $ tar -xzvf oozie-4.3.1.tar.gz Hadoop Versions --------------- To build Oozie the following command can be used: .. sourcecode:: console $ {oozie_dir}/bin/mkdistro.sh -DskipTests By default it builds against Hadoop 1.1.1. To built it with Hadoop version 2.x: * The hadoop-2 version should be changed in pom.xml. This can be done manually or with the following command (you should replace 2.x.x with your hadoop version): .. sourcecode:: console $ find . -name pom.xml | xargs sed -ri 's/2.3.0/2.x.x/' * The build command should be launched with the ``-P hadoop-2`` flag JDK Versions ------------ By default, the build configuration enforces that JDK 1.6.* is being used. There are 2 build properties that can be used to change the JDK version requirements: * ``javaVersion`` specifies the version of the JDK used to compile (default 1.6). * ``targetJavaVersion`` specifies the version of the generated bytecode (default 1.6). For example, to specify JDK version 1.7, the build command should contain the ``-D javaVersion=1.7 -D tagetJavaVersion=1.7`` flags. Build ----- To build Oozie with Hadoop 2.6.0 and JDK version 1.7, the following command can be used: .. sourcecode:: console $ {oozie_dir}/bin/mkdistro.sh assembly:single -P hadoop-2 -D javaVersion=1.7 -D targetJavaVersion=1.7 -D skipTests Also, the pig version can be passed as a maven property with the flag ``-D pig.version=x.x.x``. You can find similar instructions to build oozie.tar.gz here: http://oozie.apache.org/docs/4.3.1/DG_QuickStart.html#Building_Oozie ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/image-gen.rst0000664000175000017500000003073100000000000022066 0ustar00zuulzuul00000000000000Image Generation ================ As of Newton, Sahara supports the creation of image generation and image validation tooling as part of the plugin. If implemented properly, this feature will enable your plugin to: * Validate that images passed to it for use in cluster provisioning meet its specifications. * Provision images from "clean" (OS-only) images. * Pack pre-populated images for registration in Glance and use by Sahara. All of these features can use the same image declaration, meaning that logic for these three use cases can be maintained in one place. This guide will explain how to enable this feature for your plugin, as well as how to write or modify the image generation manifests that this feature uses. Image Generation CLI -------------------- The key user-facing interface to this feature is the CLI script ``sahara-image-pack``. This script will be installed with all other Sahara binaries. The usage of the CLI script ``sahara-image-pack`` is documented in the :ref:`sahara-image-pack-label` section of the user guide. The Image Manifest ------------------ As you'll read in the next section, Sahara's image packing tools allow plugin authors to use any toolchain they choose. However, Sahara does provide a built-in image packing framework which is uniquely suited to OpenStack use cases, as it is designed to run the same logic while pre-packing an image or while preparing an instance to launch a cluster after it is spawned in OpenStack. By convention, the image specification, and all the scripts that it calls, should be located in the plugin's resources directory under a subdirectory named "images". A sample specification is below; the example is reasonably silly in practice, and is only designed to highlight the use of the currently available validator types. We'll go through each piece of this specification, but the full sample is presented for context. :: arguments: java-distro: description: The java distribution. default: openjdk required: false choices: - oracle-java - openjdk validators: - os_case: - redhat: - package: nfs-utils - debian: - package: nfs-common - argument_case: argument_name: java-distro cases: openjdk: - any: - all: - package: java-1.8.0-openjdk-devel - argument_set: argument_name: java-version value: 1.8.0 - all: - package: java-1.7.0-openjdk-devel - argument_set: argument_name: java-version value: 1.7.0 oracle-java: - script: install_oracle_java.sh - script: setup_java.sh - package: - hadoop - hadoop-libhdfs - hadoop-native - hadoop-pipes - hadoop-sbin - hadoop-lzo - lzo - lzo-devel - hadoop-lzo-native The Arguments Section --------------------- First, the image specification should describe any arguments that may be used to adjust properties of the image: :: arguments: # The section header - java-distro: # The friendly name of the argument, and the name of the variable passed to scripts description: The java distribution. # A friendly description to be used in help text default: openjdk # A default value for the argument required: false # Whether or not the argument is required choices: # The argument value must match an element of this list - oracle-java - openjdk Specifications may contain any number of arguments, as declared above, by adding more members to the list under the ``arguments`` key. The Validators Section ---------------------- This is where the logical flow of the image packing and validation process is declared. A tiny example validator list is specified below. :: validators: - package: nfs-utils - script: setup_java.sh This is fairly straightforward: this specification will install the nfs-utils package (or check that it's present) and then run the ``setup_java.sh`` script. All validators may be run in two modes: reconcile mode and test-only mode (reconcile == false). If validators are run in reconcile mode, any image or instance state which is not already true will be updated, if possible. If validators are run in test-only mode, they will only test the image or instance, and will raise an error if this fails. We'll now go over the types of validators that are currently available in Sahara. This framework is made to easily allow new validators to be created and old ones to be extended: if there's something you need, please do file a wishlist bug or write and propose your own! Action validators ----------------- These validators take specific, concrete actions to assess or modify your image or instance. The Package Validator ~~~~~~~~~~~~~~~~~~~~~ This validator type will install a package on the image, or validate that a package is installed on the image. It can take several formats, as below: :: validators: - package: hadoop - package: - hadoop-libhdfs - nfs-utils: version: 1.3.3-8 As you can see, a package declaration can consist of: * The package name as a string * A list of packages, any of which may be: * The package name as a string * A dict with the package name as a key and a version property The Script Validator ~~~~~~~~~~~~~~~~~~~~ This validator will run a script on the image. It can take several formats as well: :: validators: - script: simple_script.sh # Runs this file - script: set_java_home: # The name of a script file arguments: # Only the named environment arguments are passed, for clarity - jdk-home - jre-home output: OUTPUT_VAR - script: store_nfs_version: # Because inline is set, this is just a friendly name inline: rpm -q nfs-utils # Runs this text directly, rather than reading a file output: nfs-version # Places the stdout of this script into an argument # for future scripts to consume; if none exists, the # argument is created Two variables are always available to scripts run under this framework: * ``distro``: The distro of the image, in case you want to switch on distro within your script (rather than by using the os_case validator). * ``test_only``: If this value equates to boolean false, then the script should attempt to change the image or instance if it does not already meet the specification. If this equates to boolean true, the script should exit with a failure code if the image or instance does not already meet the specification. Flow Control Validators ----------------------- These validators are used to build more complex logic into your specifications explicitly in the yaml layer, rather than by deferring too much logic to scripts. The OS Case Validator ~~~~~~~~~~~~~~~~~~~~~ This validator runs different logic depending on which distribution of Linux is being used in the guest. :: validators: - os_case: # The contents are expressed as a list, not a dict, to preserve order - fedora: # Only the first match runs, so put distros before families - package: nfs_utils # The content of each case is a list of validators - redhat: # Red Hat distros include fedora, centos, and rhel - package: nfs-utils - debian: # The major supported Debian distro in Sahara is ubuntu - package: nfs-common The Argument Case Validator ~~~~~~~~~~~~~~~~~~~~~~~~~~~ This validator runs different logic depending on the value of an argument. :: validators: - argument_case: argument_name: java-distro # The name of the argument cases: # The cases are expressed as a dict, as only one can equal the argument's value openjdk: - script: setup-openjdk # The content of each case is a list of validators oracle-java: - script: setup-oracle-java The All Validator ~~~~~~~~~~~~~~~~~ This validator runs all the validators within it, as one logical block. If any validators within it fail to validate or modify the image or instance, it will fail. :: validators: - all: - package: nfs-utils - script: setup-nfs.sh The Any Validator ~~~~~~~~~~~~~~~~~ This validator attempts to run each validator within it, until one succeeds, and will report success if any do. If this is run in reconcile mode, it will first try each validator in test-only mode, and will succeed without making changes if any succeed (in the case below, if openjdk 1.7.0 were already installed, the validator would succeed and would not install 1.8.0.) :: validators: - any: # This validator will try to install openjdk-1.8.0, but it will settle for 1.7.0 if that fails - package: java-1.8.0-openjdk-devel - package: java-1.7.0-openjdk-devel The Argument Set Validator ~~~~~~~~~~~~~~~~~~~~~~~~~~ You may find that you wish to store state in one place in the specification for use in another. In this case, you can use this validator to set an argument for future use. :: validators: - argument_set: argument_name: java-version value: 1.7.0 SPI Methods ----------- In order to make this feature available for your plugin, you must implement the following optional plugin SPI methods. When implementing these, you may choose to use your own framework of choice (Packer for image packing, etc.) By doing so, you can ignore the entire framework and specification language described above. However, you may wish to instead use the abstraction we've provided (its ability to keep logic in one place for both image packing and cluster validation is useful in the OpenStack context.) We will, of course, focus on that framework here. :: def get_image_arguments(self, hadoop_version): """Gets the argument set taken by the plugin's image generator""" def pack_image(self, hadoop_version, remote, test_only=False, image_arguments=None): """Packs an image for registration in Glance and use by Sahara""" def validate_images(self, cluster, test_only=False, image_arguments=None): """Validates the image to be used by a cluster""" The validate_images method is called after Heat provisioning of your cluster, but before cluster configuration. If the test_only keyword of this method is set to True, the method should only test the instances without modification. If it is set to False, the method should make any necessary changes (this can be used to allow clusters to be spun up from clean, OS-only images.) This method is expected to use an ssh remote to communicate with instances, as per normal in Sahara. The pack_image method can be used to modify an image file (it is called by the CLI above). This method expects an ImageRemote, which is essentially a libguestfs handle to the disk image file, allowing commands to be run on the image directly (though it could be any concretion that allows commands to be run against the image.) By this means, the validators described above can execute the same logic in the image packing, instance validation, and instance preparation cases with the same degree of interactivity and logical control. In order to future-proof this document against possible changes, the doctext of these methods will not be reproduced here, but they are documented very fully in the sahara.plugins.provisioning abstraction. These abstractions can be found in the module sahara.plugins.images. You will find that the framework has been built with extensibility and abstraction in mind: you can overwrite validator types, add your own without modifying any core sahara modules, declare hierarchies of resource locations for shared resources, and more. These features are documented in the sahara.plugins.images module itself (which has copious doctext,) and we encourage you to explore and ask questions of the community if you are curious or wish to build your own image generation tooling. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/index.rst0000664000175000017500000000072400000000000021343 0ustar00zuulzuul00000000000000===================== Developer Information ===================== Programming HowTos and Tutorials ================================ .. toctree:: :maxdepth: 2 development-guidelines development-environment devstack dashboard-dev-environment-guide how-to-build-oozie adding-database-migrations testing log-guidelines apiv2 image-gen Other Resources =============== .. toctree:: :maxdepth: 2 contributing gerrit jenkins ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/jenkins.rst0000664000175000017500000000307500000000000021677 0ustar00zuulzuul00000000000000Continuous Integration with Jenkins =================================== Each change made to Sahara core code is tested with unit and integration tests and style checks using flake8. Unit tests and style checks are performed on public `OpenStack Zuul `_ instance. Unit tests are checked using python 2.7. The result of those checks and Unit tests are represented as a vote of +1 or -1 in the *Verify* column in code reviews from the *Jenkins* user. Integration tests check CRUD operations for the Image Registry, Templates, and Clusters. Also a test job is launched on a created Cluster to verify Hadoop work. All integration tests are launched by `Jenkins `_ on the internal Mirantis OpenStack Lab. Jenkins keeps a pool of VMs to run tests in parallel. Even with the pool of VMs integration testing may take a while. Jenkins is controlled for the most part by Zuul which determines what jobs are run when. Zuul status is available at this address: `Zuul Status `_. For more information see: `Sahara Hadoop Cluster CI `_. The integration tests result is represented as a vote of +1 or -1 in the *Verify* column in a code review from the *Sahara Hadoop Cluster CI* user. You can put *sahara-ci-recheck* in comment, if you want to recheck sahara-ci jobs. Also, you can put *recheck* in comment, if you want to recheck both Jenkins and sahara-ci jobs. Finally, you can put *reverify* in a comment, if you only want to recheck Jenkins jobs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/log-guidelines.rst0000664000175000017500000000242000000000000023136 0ustar00zuulzuul00000000000000 Log Guidelines ============== Levels Guidelines ----------------- During the Kilo release cycle the sahara community defined the following log levels: * Debug: Shows everything and is likely not suitable for normal production operation due to the sheer size of logs generated (e.g. scripts executions, process execution, etc.). * Info: Usually indicates successful service start/stop, versions and such non-error related data. This should include largely positive units of work that are accomplished (e.g. service setup and configuration, cluster start, job execution information). * Warning: Indicates that there might be a systemic issue; potential predictive failure notice (e.g. job execution failed). * Error: An error has occurred and the administrator should research the error information (e.g. cluster failed to start, plugin violations of operation). * Critical: An error has occurred and the system might be unstable, anything that eliminates part of sahara's intended functionalities; immediately get administrator assistance (e.g. failed to access keystone/database, failed to load plugin). Formatting Guidelines --------------------- Sahara uses string formatting defined in `PEP 3101`_ for logs. .. _PEP 3101: https://www.python.org/dev/peps/pep-3101/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/contributor/testing.rst0000664000175000017500000000201200000000000021701 0ustar00zuulzuul00000000000000Sahara Testing ============== We have a bunch of different tests for Sahara. Unit Tests ++++++++++ In most Sahara sub-repositories we have a directory that contains Python unit tests, located at `_package_/tests/unit` or `_package_/tests`. Scenario integration tests ++++++++++++++++++++++++++ New scenario integration tests were implemented for Sahara. They are available in the sahara-tests repository (https://opendev.org/openstack/sahara-tests). Tempest tests +++++++++++++ Sahara has a Tempest plugin in the sahara-tests repository covering all major API features. Additional tests ++++++++++++++++ Additional tests reside in the sahara-tests repository (as above): * REST API tests checking to ensure that the Sahara REST API works. The only parts that are not tested are cluster creation and EDP. * CLI tests check read-only operations using the Sahara CLI. For more information about these tests, please read `Tempest Integration of Sahara `_. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.649891 sahara-16.0.0/doc/source/images/0000775000175000017500000000000000000000000016372 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/images/hadoop-cluster-example.jpg0000664000175000017500000011235000000000000023460 0ustar00zuulzuul00000000000000JFIFHHC       C hX" c !1"AQ2Waq#3BRTVX$8Ubu 47tv%&(56DFScrsCGfw*!1AQRa"2B#3b ?S)@)@)@)@)@)@)@)@)@)@)@)@)@)@)@C3_],ըb)oS.>"m0ı{fkVu'|%v=<[!.MT7 ;U߉zT=%Ի.{ߏ D{ ~Zkf9SF-[:- y% *^4~Vnt>\D6@%H$CKi1F5$q֘M>ضz *붑ic7;5!:IrRHIH#W;* j}h߸6b9u ('4Na]ֻ]56hKx!Gto){}.0v_3L,rno>T*#젛R*m0F59YդbWw/F*n_ݾZuKlCgVxͰL41()^͂砛R8ivwexvwi,.\%Ex-+%gꤟWZSܜΟg|8YBUT ]#k vk;jyذ$kMx.s%~Fۏ2k k~xPOPƐ76^FQ'%b7-:\j-ݐ-7n~@KStO܆hRY&]ذgfe4f4 qJBnE}si2疋hS5>9A8Brg&lri^Md[`wOz\KL-Hvin-ǶˈJW4jT%g8">>wi^(ڊwFJU/Ki~Qܳ{:1G?NS1$)\-ڕu7c=-y 8B]KNpVҭR)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J f2A3m~\|qP:|] |GK/GemmP1# x?Շ9xSw5 j `}ʻ4ϣ}1yQm[W`$O*҃l!mJ[+ΰͺnFgC}a@Q$ xO6¶Vt񿫚y)q#rkZy56%-%Ǜ1E\7Z"w_Dz%eIx-I^$/rTmB-vރݮ[Zv+KB>jT@򯜘"1Fq)WY^,_oSI뢕K1}2X;OW/Kw)NtRW/Kw)b4)T+%exbd~wBOX]_ |1}2X;M'y]K0DZ`d嫄h}^'zRT@QK)J$)@#؋XVjŻ{7D/%:7RT8OiAK$T-X{Ufŝ}\y$1gT笠yn|{;H ̯W =!ZHb9i*+Hˁ=M@'qkOӗ26^E T 2]S<@cgjwtr1W.#@誑 j~~o"ӼADԋ4+n_P;x͆Q@!E5x>ҙZnGf{ƚkOKv-ÖҐ: =mګ3DfZ!p]L&يdg0] 0;{;vw l-uJs0uSq /mmJi֑dj : l7KzhCK QqKSьG@tݥ3luOy.Tu|YgnUEg52w97R֔z<.Go*BV9) @;8l:X5[vn0 p6BXn{A$)AsbC$ۗoz_7o}O*Ykfao t^Z~t+%-)i[wJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJRJR2|LksXi+]7N66cߗ/m@P^9~خm=n"[}DVicSMstFj&^w7P88L+WoګMu!FdĮDf)3 7bxu>?c-x%)}'̓>ki豏lӼeμ$I%t!%\[Ovi Vޑ mfgRl~̈.M~2VH^ ⭉;yVTJz+buVae)R `T|yb랣%[{_00{Čz}im#5)%r#n,~;jYۮg5m`2&ԦEme+x#Q!\$Dt|/|M.XMHm7(!sX}MaCt Ԣ/c56ȏ!ӭqZBRT;A 1/9f[ג`+m.\}Sn=/UQ\ H\ǢGE6| +O7K1ТIW _w5._N\= BN!-XZ9;Es!j,lDiRV~UL,ZnY7cS )PBRxPR'%|#ҷ7fXPW1U/oCg&dnk1&rӪ6Qe>uqJ@$lkT[P2hZ_wa%iڦmYYk#1̳tIC> >iP(G ydWYWG6ٲ>K!؁՞[`8Y'rMH*#C_Fcʯ>H;= JRE)J)J\I]xs֢1p (& -.)Wb$ 'N70 I[kn8צ@&#"S.(v}^;W*!;tqw(,Xcn,[7Jp3`9{v"7Xr=kנ`&fJ:SW%)$uASC$JJ/μ!H c7i1rmz\hʍv Q \fntOq7:ٍۙESu 'ğTR`8u3է"EVIyj}2T&#-I$п[c{@tO-@ȗwfbr7D2LPlQ'UoS)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J`iELNJWJswqi!ZB\ZR> ;%YcխĜQvk##-~gF7uLq]@-"+#[rdOKqx L:^T1F0ϒGwUYhlEoHgoOHaV|=}Z?;OGw>"k&7H|3:||=}Z?;SG|i⑱}+>2Zh**z,~wN%__E[x=IeU%__E:k3Y?1-  Jpw˨Weuj?[xGdĞ {ޟ7e=EJRE)J)J)J)J)J)J N0k)-)bwnV6ReeIP#u5mN{Uω g[gM]_?2. ! Ka[B{$b2Fe'D¢ڨ/ëi֬7(HP#ȃ@RTBS"Ecy-q;(l+ncXވo$Xzudvmy//,VS!cr#.v$x՘h.}wHd\6Q78ſH[2S#tGix11^JV[H)@)@)@)@)@)A]ig2ߙ/a [w)u|ՋU֛dy8ԋnJèoIJ)q:AqRTuVc߶b)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)JP)\Kbio> bqj?\$j os꩕emIh8(l)'USg{/A{H3 yA. pvHq}u3¿c;.z9(,8-%dlǦ\{nxGb= EƭbTq(ĕ<`2 RRUԎ;o)bѪɱMd]-?$٥&[DRa$!d'M!Ԍmu^K&t*GeiZ=du@Z;Oo*zut6ow~-IrG_M&6`vu{DgW _rsQŸT%m(%$$$cԽ |kLRQ-lmQ($V Ra޹d:[J[hTZkpw !-#DTztZK7>3$XbLJ*I-a^)9lQWj7tx6̓2>,e\VytkJ] ($8A#5}M=(sɖVm lwAQ%-|QW5<O86\R iVra-J $o'O hC2x~ۅVl:c,I>L9V Hl:uo/#sMRh6ގ. *fa xWld}Aò6A;G_LYtM1l˔].3an7O A#pFUʕ5wL4bf蒸NOS! J R|\ [&{c.}&bbO$'[A.DdYy표^JbE;)W4IX9bviAA$) <ȠR8[ܲc=],~^:$:n(殢;'XõwK `bB*<#_T 5IKtVGC6rPI$ "˲W0cQ],ܐ!uKN3Yj.?s1;6+m* uav'{êMu&6y-; ٌ\Nz+NJ6RTAV:kxX/XmK~SmjBJvJRR(6T:^%Y9RCD£iaĭJ B^>^;)'Ӈ?.1Ŷ7/w ʈˆm&{x !$+QHmQXlndޝQrr-6Hq$-\g6a[YTs _X~|{ sN P MiK)Z)mP+Va ěW*=5n<7kz!)=e/vCaj(<IFV;-Ci HA{=wv[nUº IKtuXvzu6ۇq.8rnUGHRJPth,.@KHOX ޶]qcemci+p~ݫJ+VmȱKYJ9ýb%otȁi (7zֲ,t ˤ_RѿPAzaLvcw7㌫u[z(^翟zp<ˬL?mn4ӽtwP쐢Nq #]ETóJ[ݖ#ÎT[ v*Il*EAaJ'ʴzBv!J<߷;-q`MޝrEڼ3eQʉ)J8=Ƕ`|Q1𵶊/B'e fߏoWAc\fj+a nB(6gıb1.e˹dDPHS!iXmt (y|Y5m`"" 'm-RH6ly][ ^#qDv=X-5 %i 6IʺڎX15ӅaQ/q$GzZnP/6{TeB$F}C,rJ}4eY-VwMbZ0YiЦ` ;V,v)kc)Ӫq(,ۛ@p* Źn)2khPI?Q إ.jK*,2±e;ZaYnt"b"͎fi}Q kuxmy0\"$ۅVYufrqaa!)RJIؓCn݆Գpj aG} nME<%KTtGwd8) ⴂ7I}AГ&&aȐuj 䴹qD!O<܏z҃2ipŔ!L2\%3!%Mq E)Zvm;yPY&kXZm7mŘwrJRiJo4^aVAN7e2u\TL;6='t#׀7T6*Y F͸k+]r-PV-(|ԝT֬=gM>khvOZ%oHOdGoxҫ^R/㸝)"2*R@;8IA;덃_t%\ENšv5˘-ccނťWK5o=hA2>K=Rm=ImM7F.VY4T?Jh|*wmJ_tLoqqq:cIuG̔%J z촴Obwu3ZO..>Ax l˞ZXm|RJ$++!uZ^o1܆G3ԇvB6ڞIRr) 6'rkfX]cԌ%r ] Sֻ^ u_kiJ8}qAnIZJR$w+OHO$=!xuV7IGUD 4*9ҫ|~)}lCVHn +]1Lqٟ㌭4]aIR[eƃB~jN{M峦ڜ1ճt6ŐICyT C~۩\ŷ+b%r )(p)O@=`}5ku͹Hb̀fE\E$)y}놝떝V2uCK VyN>h7c3Cq:ʐ댫ٜZe>ﺒ$ڻ[-u3 ϘJRq@o@;UM$w'0[r,b%gi(=o-zzso|mc)f{u4;[fi>U*imDߕ%fZ-mL_4ZrIFn1e!;#wZiMڂťWu(u<-iQȧiiIăW=ּM#[eۻK֙rʀ 'a|uO Nכ_f[JG}sAZy4jq?9#o/:Kּn#i+( ʂwJ[`56ۊT<2)RRvê ŵ̱{caqrd ˎxiq~N緟j * kfkW  +deq4Yx3v"xZ;86PGmKmV@w; !F-*4 ZdG$%J@*v ULnו\0Ȇ6ߡ4P#u젥sWWa-[L-ssQ:k[ATBY szm+=V>o蔸eYf*rGm}n{ Yiaѧw;ż]k- -jguW  !6Se+QH%ZRt=mV9LU'OBt.]Hy9f꯱y|3\.KGqi4!` T껨w>eŵ̱{caqrd ˎxiq~N緟j`٧BuBmhJ6q8C?7z *xӍOɲoˊeYCH@PI!ok-me G.엒߲͎-Kgm޷f%*m/=V1>o锸eYf*Zm>=mqKξ饇7F7D_LunP:h7<ݨ,JUwkB/\.\$4L{,ME iH{POtT(y۔wn )[/Pig@ u]-#Y5M<ɼ>հj5択ۇiM7%[)Uyo]p6S)}FLJiităSؖFRTۂ[k=Q[!ٷk%~qN+ @+k2{,mCh*xt!N[MS!V<~޹YiGd6O*{4j wUfїJdd9(ɯOuʛ)n>( KhBY_c(つ)|G%a̟` lu݉7-1XG~UϪ_mߵV~TTۿj%eK-^sbrXa-vG&֕S&1io ѸRG9uRv;6}Qcny5G5Yʜ[yjz:SiPW< ǿ F!*S 6}K˧˷/*px<^OkvUڮ(x_ع!| *huI _QW2|\΃jޅ6 ï~SҹoMܒIR}%%EJb|ZB7 o=\ım[.>bT7Sd`;vYYX P ߰5G+jZSҧPmGs޶Qh̬1r[ vex-HVwI}\N) (1>MViA)=³J qOS (0R jDGow(:[0#ǫ%qmԣ ̙[,.C(zBYmK .$Nv` i,g2GfE!Dy'YB֕RY Ubv7 m2K EP1YRQpAҸI UH  w?J qN) (86;Z=r̈zCjB)R9'q v>uW?eFG1n\l @GhTu+J!*{BHv) '揲A) qOViALjQȖ9~rLt"** wEaN( vT]BX*m,Zؽrܨ!Iۺ~ƬGN`{?gvnˎ{*{b\6ZZ鶴l}m؛qm_B]iҴ(n<W'%;UO,b`}ɉjDP@\W~[} $=sL+Ĭcɞ;ש;mS l=œS~j(:e]l)MHi.46)RP#tWaq7{sF u@.Aޘ?(# &]ͩr-M +hJY+* VvPg܊aĸ}e;ZOb:{ojݱ[drb5.㎥亮JFą;,wn8*96~ 4dl)ꒂ\mJqԥ|a#;u9.a*zHJRGm6|S{fN#+4>Um!tK.lK6eEjnf:#~ܔwcG P~\SS& dSdTgJZ&|_rn#z<=u6OF|?rnJZ&nW$yUҟɹ,})!jܛioy^['#VvO3句?-BsM>XQB7?v ObG3句?-BsMw,.Nxbxe_DXX'[}^yGIP$vc&&%sOUy5u˱r7zpM2L2m(7EX(QP&5f|R.9;x&ė%uRLuPۿmBV5)JWV/$&{&KFtmحV ?%nR{w;uX]ZmyZ^'9+JF=lbޕ?"j_)oU\nuQNl;Ԛ:M=I.O?ͷn'm-WMVǣL}%K IELT1cr1mKoO&ѕSz FyOga$-*H.6ݔ neŬ9lR#Go&DS8>nHu\ КT>1b/sc'ozEDfKkB;v,[b5oˉeľKJ (A]QiVwX=H;ÿ*o oX||;/_TR:J7e-An ǎ}l E(\KaHXHꎺG %FXvuGwhg91@%opu2Vͪ>)|;X1lV>T?m\=^ẑ *fGQ3.JO!nnn1O.Wz$/\گ0 Hww8-IQ@; <3JRJRJRy3Q [h!rv[ l߾ Wj<)]NK24lQǑJ'v;n>a?4})J)Ju▜/\sK-!B ۈQ*mH{=mt'?#]ʴ8"F5O9Ҳ/vœXQZ7l|Vqp"(-ɓ>#S@;vm#D8&=>uǘr_KS);78.T-9ZbyqYV;@%-*I +W5  ҔRRWq[AmP`A#=WޔXcwoy^𚝙&C+!)yK/y+6'5kpeq\aĹ {r..Zӈ)Y.-ġ!\U˶[qp[ .6=u*!#}si,ȼ,2{BDDyd0қ ,F+ *qE\x WabȣO&[)s.iHGäTWZۀA;yAnsj^Q6Z\hoi J|i$ڵSl~ fд?U[x#\ucfJ]k5ZeV@?tBףw}Ǖs Kmlvi.*yB]^.0OQ$߫;lvGTnOeJ/'-6l5soS-H`Cr;6[BB*; _5>YGЬIˈtrd[xAq_4Av'nWn릕ScIrk'2&xĭl$o?O,#W oi pLWN֧:%K@Gw췩T#>rɲn̏IN~RL~;pcRڋvBGwHI O$9!0r$L&S1RR 8\JX;{zv^uv9/+:e!KW֢7?Y5e5eɚ-)3:_ 9SwGfO5 1[}6;qܕQmߙdRt* ({[qmť)D\ggnkIoܿc)3W~jLx0yQǭ G#GS[%_g報j3m'`TgS+BNtF[mJ[I=$O#v|o)'f7SެF1-?Tv+i~IZѸL-{%`7$]<_&ӜݧofGH Jwj|2,:qyS "K!}6ЄJy;rBPn>m'bX7]!%|1>d.|/RC nopOr>ҹ95%e֨M}Qtn6!G))I[Rkݿ.O?Uo| 3q1ҤI4GQڭʪRi0O&/#=4_j dZ꯵aR R R RˎjuT8긅|JcYnA.UƢQPJʶ{IZ8F< )@)@)@)A|QZyoe˝3kaN>OGVΤZuRFG$R<Nƃl>a?4})J)JsPp <ŵG9XakEq;0V}owj1N 09>۩؍ŵ)^T݊F[At;O\.]ՙM6P:{I>]N'r mBȷu?d}Sd͛&浻/%8?|H@=okl/$̫fYqKBu;{%ďTLv%i,֝ACZB0EX},!Pn- BJF<}*(` iiz?Nݶm %HlGj!HZADPs)@)@)@)@)A_~ 6ͲAoեTΜd,s%Ϯ,Wdw;ݴK $*CoSkH<c|a˜͏ҭNK#=f2[ m=:RDdUdۿ-Uņ¨jU6or3cr:eN8 }F8O-V*|a˜͏ҫRz+xs$yWM&tƌ(c)ٖ6xieGrlޛGq-W\r<ːP]! InF_xc.q6?JvLz/֩y¥G. [ lJ-jeĔ8ېBP;F;n/7UqS&YVSVkZ6RQj0?fS\l~N7:2_TO<zlW N0T6 h9}|vi1j$+zRm*q{#Ēl/<sYT?8QD \?5?8Q=GּŏHgC2.DY-R&eRRhCd9-[!#r<խfg,iFf+s,͵a؝l:0R; `=м.~h]\BpK(; >oj 3߁ڇ!zjB|5 uxmGDU}k;\r-/r dۻRU[^%]rj%j! nNù2mzh=)m[Hs+((|m†\ummCrd7mcq!#rRհ 5["q23:* qЄ[emEJo-WRUyޭOL˖Yeu Z*GXo+Pl %{+4OQ;)f[[$!P:.6UJRJ{ )?eG| *oH'#~=_w?4}|YRRlPp ";K% dBʻSO.YVigtfcagʔˁ\Lka! X'}Ok5ɨ>A٪BW1嵻_S{w6ڬ:4yMe^ $.0L{߹tM>y ‡LJκd7M=kTE9ŭ”ȭ@w; SJ*1Ŧ{HY!HS*;æiImiJP)JP)JP)JPhr aeZ/<rP{X Z`6V5(@~p_B'=~ CEss_Ч@~p_BjP\G)s_Ы4W?'=~ |G*ƥ @~p_Bxi )ɸf)'mR$*WJk0h℁Pl.r,<ٱcܤ *.ST g1+R]`^Zc!yM *i;>oNJP*mɱI:~bzYl˟M#ןKL;ڧ5әc9rR+d~$o߽ZkAm,r\!R۲b<ܐQ\P)b;| Q >/n?祖0q[Os#B])aiO^ym-;|9mv~WWJW{߻sk~/ioݹߵӆ1yz2T"LY/h8OQNǰVfy#LBZdMn=PQB\V *[iBT>fw5GU'YʋŦ,A qqv)JP)JP)JPa^GHg*Ev':u.<˜ѳ GE*@[|QZyoe˝3kaN>OGV΂L>a?4})J)J PqmYd%^7:AA[oBqGXn@}UyPs: CO6YT'Q;(`v=s~#$̲&dÌߤ92OFPݽߋJ=a}Stϛd>{P,%IBCRe;{̚hwz 7j ڔ1qky IKgT8y~ i/z\nv6eCÑ0BuHROb 0h;qRҴ); r,>]ω<T˅JmH^!(K`wߐ>ʔHj ұޝ3Jzw+)J)J)J)J)J)J@t>sukziY R!}9v@9WTХlӻHm[vR mz~y+{W(j)n^|XNA+/ ';Tv[6@],VDf=qN87=FyJRJRJRJRJRJRJR̰x[ՑclG\w7^ UUeuVl켱C#juW9\C&혽o\d],:ꖰRSblwHU&CwHm&[n% zIl%EiےӾĊf?qoz|0\<_ݷ߿U_rYe[qU6ˆd+/iEtܝnNnI q˷ITw&nR];lw6C:TՊecr$u(JxR7IQw<&H"Z[^"bۭ+9%K { G&eDpiUHdg%VnkD6O/](RTPP;'mGyZn'jlZ 5$&n&9WM#Jy3M>D۽ $ekR"XSAzBx}Sa7!dwvQ&.u a8PP;`V w{MNQ'hЭȕ >_SQ.%C _dǧPߛޤ 9EuˬļԧmI5LmnFL%:GWTTd,g>zӹ} ۳"ۃHJ (+$ JB- X*x^#_f6]_C.hZؐ CIu)$6%<'sLkcoS,1Ev[߉IHPZRT6PR2A6 V0"e Fmo4rsovm\I!vL\($A]%H% {}U 'ܼDUX9>mS킔Ubڵ. /ś\봤+ҌI+~!y'rmOQ;)f[[$!P:.6UJRJ{ В揲XOefJRJRs S46ԥX͸ SLsm,Q[n{U!Uޠ&=kZc4fM:+Shf@JFʬ7>mCGL-9Zrb=9Yy~SY5Ŵ‹adIJX)(vcHSSiKq]I4O.s3e8<c :G%J.U_.a»*(vS{DJ-59 ^L#F&Rr Z~Sb~ߊ->'JR\+^)\*gJpx/"xeӧ;2H;c޺vNyMۆdHChdrVx tOS W"*SqyߍDZ}OQEz~S*SqyߍDZ}WOEz~S*SqyύѼ aV.k Z Ʋlfb`^_C:\mG.![`vP!$5Ρ*6)BWaPyd+ѳ7l۶z&Y RRRRRTmc=Lt Z"2hN֩@4O'Ṧr Hk1ui!L!`dY' )J@VhRo-nFdGzBä+F~Χ^Zo,c.&;akɲAn=Yeݗd /BF#^U6LE>85_e*{T'7:WE\D:B܌7mJl=$}u誡|!ZhtiJV9 ߽Ufi\y~Q\X)ʚy3Q [h!rv[ l߾տ'j<1v+= лu!\捘J8)PDzk'};Vy k+ʚJ{ 5v)yƷ~>g6|I-D-džPc@a`;oޠso%X2FW2lhʒ %1o)@Iv#Χ.B4Ns+ZۮL-B)ԺxVۺ}yUYa[[ =cOi-}n)rgjqDt[nUņ}[aWm!?kTϊVvѬ#̑sڟv&<™Q)܎'j]mVEwۉ>a@"o]dVt9mDXi+Qnbʄ.n:C +ȸ;'NvRonVn`y*ˎnK1_۬% H5xduSx*3e-Gq;߽uѥ:jY 0[ 2XnW%F{㶿L[O5ץnVx?h1ξwmRn.ۃons%[?=-McN s v%)<=u-;?I`+pq,bN۬0c-{v)IL4|;~> `i )!BS ="ME={xϊ<밡_,s:6"2e.>Jw;]gmOX˙!_ͻC_q-3K8[AIPQRjINͻIrrmq:\S@Y.;(-[;ťuF=Y ^я(؆ִrT@j&FJe~L%~XmScAlDI[]ߢP=m|ݍVs296oVQ/#utR@`;orFtmI aAo\#nÚ*Ioۭ-Ͳ 1PIq- $-TʔI'MZ+hfUVcHhy;EgT9nuLGC[agT?;ڝ\6v?V+5)JP)JP)JP)JP)JP*\/$sH<֔>L!I#dm7AՋU֛Zƣʜ\/XBb,%ax]vX(]Zql.d &$l%Ou[n}Ubui'^2IS7=xA>۸nbҔRRRRRR-mkIJՁݶj¨}j[#z=^G9%vҔaՐh ZVRTp׸v=T->[ lmJ5gaaވ䴫24Xa٢ڳKG p1CDۛbN݌c5/dyɎmq%zU^;Z#Ryvk )ymƓg+*}@5v~mipnQYIK]S ul-H%a[o+ |Z{(Ѯ=uO'L7U]3aً)pmٔdX)¦ݍ",>c%Im~i:!qW,,Xm/"vJ=lLF sZb/R?AV/ m?q? m_q?7Qݼ;E`H٭XtM ~zRӽ0#Q;n;+D$KSۗ6 R.R*q\aJAQ@T~WY@>K=uhλȚjk݅U(+'ѮǸ;z&_" tpnZ-7yo[>}6+moZvqU# 疉=u CNjIJ _U}1z{l۞Kw \︧#muX֝LZDR̃Z1 9YJ ى8R; /㒸'qz:N$ݒ(`=(bG5lU"Qwi3|:wDf}eZLy,ZϟN4wP)%i "sZvYzsn:)km:J9pRJPR'LqDk5dOYޛ2i',Mn*ܥ[#r#zkBKBS1B8Pڵz`)rʌ ѮbqjrY ҠP@R6;2i',M|4EEAOٽ2 =f.+ڦb)S8ﷺ# 6UyrQՓٶgIȮֈk}u钥F}MT@A;Z#D"g-y̵v)34q^Y9JǗV,V+O FձSbYsxcuVkbd,!.3-m斗Iv+v}w&CwunI.)iΠ p͌|ļn5uce5"uQej\u,[ PA -*ZwߍNK鶙pUXKZ'!) t)T6I#.T4S[M'XVՋFT3HX줜:JOnA#i}̗m%6>qn?}OfukdSl}8J|}̗kwVK6MnO?}OfukdSl}8J|}̗kwVK6MnO emmէ?1 `)i T[嚫Y٣PŜ8 %v]JRJRJRJRJRJRJR\Rm\AfڛViAfڛViANtRWubg ^[Qcr q+~1&9]S߷}]0Ι!/+2?fʬ1܊l2c:7C>i?UVz毟wmV9ogYұ&fis57r"KS^**r$ $ u]-b2f0u CA9×lz|UkO}ݶZwmNI*ӏŊ-YBk~'ts5&AfKfŒQgHhI,HG~ߋRPiswϻZu<= {m]"`=%- ZD/( '} X4Mqwɱ0Hĭt2RBIu%;HZzUkO}ݶZwmG{Btmӷ4#i!Il2 iHjQJ S`*YfY$F[چԩ.y-uEn,jQ$iU?nv|UkO}ݶ&cD?ۨV1KZudqq~OI8ܭۿ}^]glEJ-XB'Uƿ9xtIyQGˋ%j^*>ڃg8;} " qfN~q؋iĀXB_w~:pr1h^Oҭ.`w;ۜJP߰ty 4I؎]%НmHvcΣe HJRJRJRJRJRJRJRJRJRJRJRJRUj2u3 mq\hͮ/s.OnUPL<>͕ޚT e 'J )J@-*᭙XmtK =GU!R`wA-n_y \mv+}COdT{=!A;cZOc:%01|mwඅ4VkTy) 읆ۨnE\cK-eiOH Oe؎z?_naqM?`^Q]&+j'ˮ*cI`Hw]b_kbNX}_S$HRP?GX߈k'tצY-ra!$ ;nT> S_2AYq/6B]bYn)B]@ G6(P{Ip}M\F璒HN)q"R2;Z.Cce,ctH#'W3M2AvJAJH>Ѿ*hEWuijb޶Jcⶴ -@ $P~DAnetyL:96.%,{+WzkalU tiVq-zcӐIPĥA$ݪ=6%̵ܥ2geY8h,' 㞌7m̧"tc]Ӝ!juJÐշ$ AkO7H-;Uk o Y:`;")޴Ž~|`Kћ:91+;YӢxHmHm3<6ĆS>\{CŅza}:l`ԋdݽ./6C6<FA=:݅i.ds3qg伆% *O}#sKWe]KioVVaa`7hwKw:&EyR}RPC?YRO~ZV{ݮvՌN`]L3#!.Q0 JP%Dd~R_қHe!id>dW,Ȑ2kOJ"Pi* "|BO*8;vCl`ӭפ>t6J=Ǖn0L{E_nR|6m/-QPp#w撢ʿ8eZ?)eYW bN޻:]w_r lfÐn%n?A.-¡=m]pd9%/Vi[z1ڟcZv;n#CIJY{Q|YƴVL*ڟ5:󌺘$2$ĠnTv!x[*,;V,6`,7lh;8PֆgOQeZm+͒>* Ȭq?tW. ߾fN_ e(fݕ?d,8Cy儠{T\hO[udaCli~4XB[~c$R!+ 95?xxǦdd fՀߣI&\ ̈́%7*y16c[cܤFcbK HI/ȗm1L*cȿ,Bk@eJGFJVečŗ!YY(pqK( ^}('V;ncJTsH'Ān:`mvheZaa@Z vĮ(RۿQ ^Fc˩p]D<=)7MpL lNJ@>K`a6ѲGֲ?(-?>j/|ҽHR|aSF;M:q[0,JmL斥᭔6$)@*_ ^sfk5RDӳI|àRRRRRRWKޏ)aְF}1 ,+-8̝nn9zɒaIlw<ɲGb7ԓ4x<">5H[e-bIh4;l CT4wY,+aoZ3qN[=N3YmW4x^yΨNi90ۺ_"p6oHjqٶ/0ԔԦ6`}n5o(g}Ջ'7RuIVsh'ӑЄ␒W7X5 4f*TJu!eH`8+ͰL!^{{U+ZunSUa#ǑL_45IxGM0oOk_Y8\Epjei[K HǽF2 xpҌYZ~?lyVR&2D Je%ّW' i(?T~#z,}O]Ws<>efʰe\kU-K,Y JpR;mvԐI}T U.~Q)ԌR~/gEɦYN8x>kaokjěaxSRbԅsJwA2Vvv\A6#_x_u^_1E{^,@ˀG.ܼ愂6^Rkqe+[':JVlhڙ׼}QfN=|BetҶlF㫳OmǽXN|گ595^0f ʀhNt:pomXnoT]__MeʳF7L坃smPB>?mXu}Cj\]̷N܉='5?RRRRRRRBAW?%rlY?od=)-ϒ %#(;!OpGl_MS+Ğf庑_*i=5iS&@`C'q(EvJ.W-ɲ=𫋶 )#bT'J$;S>‘+?_9?a7=.jR`Ynwqep ?sR;n~,>"2<2i}FUlۮqyp\Jy< i}#lq-x*T!u +ݰO"NޢWMJg(bQ:֩ 2oɄ"% w wdw7vޙH͎ǢFM Z$m)J 琽'd߶ OM, 1p.afb[@n:^WarNͣ]Kj~?8ŶKU1ڙimP)SRRQ}MC0"z;MHeN8D6!API#`E}31- #ȲDkt ⓳P ܑ&Vksm5 }%]G+u)!<(}zP\?,u$iCxH"&T.I$l#cGX?"zm/R w+.v__j * kϳ^d0.>C2Pv(R]-y,|V11ȱ۵;2c['?omM:'s *ڃfEz#i[ Ȇt#a-q_,Q\"ِhOFe'\qT^Z 4*+8NW-8{&-A-@^oSqj'Rg:)F? `߶7Abҫg6ռ^ݢy=طYBBLT{rQBK#snU֐0iAAK!{;Xtc;sH2L2[.6R 뺍#W<>qeL$qHT|  CRA%>>Tp"f$DLܭkt4x76 /H =4z>^'t?U(бI~"LSR-D9,wzf9Ep&Cȷxw rqqh;\A;@4PݎhK!ͶIlu ``FƂå@@1cEe)[$Cl#m}eY_`7FWiDH9u-x]wRw 1Di&C.!IKr" T: ee"g?=y{+RQk,$:YU~ByJc|H?Fˑr&@P驷TܒCVp<.cܟ4#ė)S \HGAՀFz|A;Wyeq #Ѽe:t W :i kޤ[2 (̲\D딘N4ꂀV{6åWXޥgj墙=ڇBorTU(% mn7DVp'E2t}("2azLz:^;ۆ,ZUsԬњ۴O'(U4InJ([wdnwwzjFm_Xt['ʢ]&m4H()y/p=Gz f,x-ļBJ+ Cݗ߸ IԜb-hPn?d/S߱PX0&>5Yu髚B60!MGs!gήUY̓.kOY “ ̎[ג穹d۶Ӎ=ߏ^ٹC\2!0Jɥݶ4)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J)J././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/images/openstack-interop.png0000664000175000017500000011060300000000000022546 0ustar00zuulzuul00000000000000PNG  IHDR_wIDATxtOrC4jQtd8RER~J g9 L(Ͷ8`5+EVlQ-Ez̔NJK 3߽o>/{s{>yHrsīJV}&٫dly  mXQ:к/Y1Y[/YGdݙ.{[ZdT폴6#Y3G!s=8:4ЃD"8ka]re^̸P~?hL>̏7}O8n6p Ldz6?}gZ4n$uWO6N.DCYD (66~D;RﰆKk9qL^n jcޡͿw|ឞA{o^'+eW4FCݳsncտU l3s@|lؓ3u6.GVA@Ke'kQBohzrͱ DE7>9) |ք.9HM'/mlasm='+**6@7ު OZu^Tߵ䠍M$o44daӽu(;0{[;KQm;S766Q\Pa/'%@~ILòGWqRbiZq!Yռ-,sBicKV=BpjinkcX]uu[wqR+Edm5[&*2ƍWț]VV>gͼ5 @jkt!xOxocˏR*-t {^B0#k  J=Tw A.RCpyw:I[fM]%Tmݳi$k"o6NWz}ؓ$x0|ߎ`J;\ X@mҕ Fa âG6@~k{+5m,iOL1c°YUHkJ,BeZo_k5o% S5槴@G|qRؾ{9@Xj s mƌ*m,5؅IѣwM ݌67pPtFϢXm,Zj(Vo)b/ PY(7_-Yykm옪1LKekQqPh<>P5Lj9iT6ꍫ>ЀU=z%m,ڹA a7nWyw0pAHT|`PYX\ Tv5u4o-o1<>Hoʁr&k4hclSBW?Ȼ %Ӈ(PٮG<0sPmlw/m,Z aZU5 lb}~p@Ae4\9Pjƌ9봱pI$F)6WHTևo`.q[yˡQF6Q6'WOT}p{yHPVv$C1YzoςcOr'w~ܧ>ז:=i@W3[@AW|-l׶-x .nGhl\=x{O0ΰi~cwKM\ʖs3jT}h"_?Ϛpve =3xi9ݓQ ^_^wwJE)ml6KF<ֿgW8ۘ5\O@6YmεL/]'O?H#H Q-\fՆ>Sk/~z(6X<6YmbV0χcNնٿ\hzW)L =.S%g^6mlqe=^F\âҬ߶W=åU֜N2)Rs )[7~B…gwA8?h-ͽg>~ ..3?뎯44vݦ?tO`=~Bϑ鱹Ǧ]s:~-|')om{_׆ߏd]fΪ;ZZ2N8k؅_}л~s]NAURW__r=AQ <27^ e[ S.Z\s$OTLg:Su@v=g. ~H;s-׋W^Z3o?W}RT{wˠjF{n5W'su}k](Ka *i_Hm>,oł.M]2wTQ}W}_c/tgb9_ vD{Ct U9=>=vf=h].ڛ>faV5 آ-oj]@tPqm2S@wR`.j,螓j@L[ujxVlu˥#SzTmt (p͈ ׸1(d=h:0²Kfv~-}iuRN[ڵsMST/v+Hi_(! PEP4޼h~zftyI-KJ۸R{L@ݶ_= ͧ5-Fwj"YjՎ+,s;CN=a/`R*9bPn?|NDƛ wChR2v+wuR| &gtop5e=K߱ND?BGW܂a:yN &3LЇg6H*7LΟK.x:hpAڟgbūP3wٸթzdTuydu:`V)ml[00݊:W+8jZ qr}K'l:;[Y?sM._e}ű\(uáRS W\v/l催44 \!u?5.'aJe-n jxn+~T٠#2 R cuWKB n*8kgxz`{Ǧtjj@]` t@RvsYJYdx:8s[BIzXlN=nw" tQmlۂ΅~vS{jĎkg jk7'8uyck P/rSdy|NJY.+wp=-«[ŕ ۸`? ϙ Ջ`w)YxۡTU|ocY:iܖu~D;Q6գm[-rҾMeu$4?Iu[S#^agҁ7նvtHz6tm-vq__:Gz>5 +6$Jm&jMX_jku\'jwb*^ f}h{Z JUBbGiɪ-@zUeeeOP0f778nhr %ko9Z[^^Rlc)}"xԇgjNysrքg2ʄJٶ}][ LLѸ4g,`*{: n(6Rlc-]TZ NM9ȠT_vQ{pƖ6ji(amd+#[hP*mz>OmR٩gW¯Vo-Ѕ*pP_9Q8n RY-"^[{& _eڣK.cnvwn10Eɚ[6}&-^ߏߟ5&c{*jk=SJ=NfI^@ث.-^_N.Pxzz**++uWŒ8)O6$9@KeW %@SULz`Y]ࠧdhc];v&6-8H%Ol_ܑ*^@~-͝{nM$^TLvsq_w̢߯W=lX*<φYym.wElC;{m[vs:1LJƞyF1ܮp٥W+Ր{5 Q8@]"M#P?jh7 (6ðMicG z/hh_t|$V5e{j e]{Q OЉywJRVVAɚKړgZBGC0N$wiy[;At=|=Ey · S1sfz M.6v׎ֽ.`ۃk7#ӽB/IcisU7~B6 ]+a}VU=AW=F{@俫jسmO&5tmLw/LÜ^'&&kg=`;)W}-\Ǟ ֬lypQ>4tYywm3bTyy'lROM=zhcmoh^ 6N&ڰ6+rR `L/hA-t2k=uˬ0ƕ{*Z]W>mchO dw~/NO M=K٩ĉDH -//~7ݫr(dO#&6 5 OV=pR@m(Y_KVWWw'qncB tv0O U.S@J{pvO [ 4G|. W //^Wbn=6]A6_Yd?[#_`\:0xSm] [eoCߟGp}۝݆Ȼnco{\mٰ+L3 3ņݨȇ6En︡#!yxz~vx}Nc_saגsp9lVoC沮4C#7ܮ;ם{~s=sklBB O6 `Ȃ:ۓrÌ;)יyjvJ^l2끝jz9 3]xl`24ן:ixOJwn: nӐk\ Z0: .fFRڞ&DUg(bn_/Om`b^"~gIee L.zA؀<沓tr jLpæbQQF+[U {݇#!û7CEnSϡ950@6|am # ^>DPX3,M¸nf(lLHys6s-l L}˺ww}[tKOMDrٰ 4Ek ٠m1'`C.;éśu)`WPfΛ56s꥝czWIck1F5ױ>޺ nw}l&v^ߴcs͎ٔyl`X @I]LMo]gu?/rS7߫6XhcHaxlESM{q[hcyKk)t@.T&YY(FkS}\a>w:1m@=mv(lXTH]v{{M zݿ#Ð M](v)rzܻگٯ\mɬsٰ0 Xm5GCp6'ap=y)Y=Knfͼ5eT\|ycǞnC&1@0Iq 3ck߆먎H^j:' Ef[Ltڀi׵1rnwqew@}1M/MNmawMEy M]`QC, =*XjCs[^7v,~_Һ7gWn)Wu' 4!]O6ܹ\'Gzxn.j/u m^pNk3 - S\=o7./?'ܐgu|*/)++ۑBz]']*z`ۖY .M~'x}VO BZ,0!66,p# ~2{d\si~\v_䲓Ӵ;qKL)J 8z螠UGsh~;O("3 @ {ّڜ7\den\;_^ceٟd3ݸ."{^v&ϣaOÅo]b ֎?0as~ 1iKqlzptre=xz@b~l1FCql5(c.;,\h KLfsϏ I:nsn#{mp7GMxjWDCo"Q.(u_ ^wK2W~` T9=G쇑>,'CD!z!m@i=HʡmM˸i(\Svwڃ&Zm'QvuIr6ʵ ם^V=`N\ZVWcvq@5'^/q8]k/VLӓbEc+"'{ @(r:ّ ?5Ū4%O܁%~o̘~Qa77Poa!G"{{"Uom el0P|ƶQnJ>+]Ϧ S龷ٻތ>{tËdDpd୦wH].rMf x(Z{_J[k3U Õ~ևL^ &{=y`zٻEHwUs98ڣi~^g/LQ^h ۖ}h/mfi96L'띓"V؟0G\V{M潥r{bC:{V=7:״JRpp\4m?:uya08}yh>ڴA_G`~$"2fÃ0m, cucydNonJ{|naÖJl°^7̾[)u>#Z 0X0|jl|5WÜ}OR) FtAX\5oI&~P%`R;M.F&j[hc t0<s ߖEG+H6DqC1lN\kExm8r m5`lg?6@N3EˢV4u/뢵۱†Z+s`z;u+`l@ 謤 N9-|Be+`PLpJ0fZ XoVk:& ٰ 4<`S}q^%Raڎ܏to 6zN0oCԶ- ?y 88(4] y t|TűX:aRhc -xQ3?F-- j_CgXyO1ʴ0:F @ -PSYYɂWԀuocƸ88W:G{T}Mhc.5<^3 G ZBpo+pp@a`#(|)5vG rBքC=Gez`>f  o. jm0rtkVm QY'{A@i6j864v_t#Q<xo7)4V0̚t0W{RVYPYYzxہ  jhfͼF k]uua @ 't>LA/uor-S^mn=φEͼu<v @ `U'H@Np& a.=G.>X5:EJ.F{{v>U },Կ-ZߣICYX[)Ǖ6գw}3ex]qM]i֍Pn #$-os8I5Y[é{RG9n]L9  WZt wՕו\VOoeՓ<)ǭ^z}Bn1dM0FdcW.aꡍRPݵ`]^mwO ozk"`6<#&YG\๷_?FRu"g=V?khiV"{>u͆U qazY Щ9ڊw]  w}5ZnS k-t'{.WXҺ7:k'@ g zE 1T0jTVDqu&<0?wWשmDS+4U}HW)k+ v5[рw o~mraTsbK{TE.0qv @ *ux+STKý>r @ 4[QT] hcR]YYهjpsQ)j͵ o5o{q3m,ZaRʨ4w)W?`U$kZV& |&ka(PuҁMYʨn/66*H:SjN^o7OwůTiWk`Ƭ[{AGF<^%kA 5VC!7:i;G/u a(,u ^}o]?;LQ)k۞_>`9i A`97̽`*mۦh.QN=Çx`G V~;Aυ` / v!T4: yywSoAJOnR^ ~̘/y@P0З\|9 cQoίNmRpS_7zb͆`I'mD@P`&&]`֎^3݅% GZK`6t}?8a[qRuw = Lc<cO :~@XbkSkN/ zc7ҽԨ oi,\NVh+\v}LXm[n/Ο `~ 4}-xoE^>k|] U׶@@Fko\Ӕ}&{[lnm~}NlN{˭OWj f׌+ӿsb*(O5Tl["56gM8+ |*H'U;pFXi^{FU=F{gǟM'?S5fn+uԾ's`DE0N*R~oCxϾUB5urI1oNm@XmT "dx;a66EJoͺ ReCBD"F2`N O{ye^M74+wcj*^&5z/$Ği"YbFǞ#@xw!5Y Z8zrQʖ=)w': }l-A2n GMaѶΆ#;Uwy`7N&XόYw?!ܸ͆aS$]|v[bƕ¨HUX] Smyݰ=eN;Rz_ZFkzSOH++Gľ`B}Dg׸ERta07ݽfXX/ Q V,){2ecl{Fz: e/a/7^iu.߉6ngY:ץ?qW{&^gSK~uyyoM|oj1@fӷ 뢎ž V'*zbrJDy_mC6~{3|>n#AY'7 z?F_mN/'ͷ~?R${.N[s  \(]Ughn{*R` Z!Fh4†dM$̸7٧Yl{>ܫiCq"ؖw}_@Z M+D5mnf&msqFב[3/WA%O;SDzSǬ?;O 0!Ɇߍ:EpKobnm4;le ɟFή?ߵ+ܫrle2YDžhlk:zI3 iwb?8: z 9ZC]jr|Է4;Þp-Ѿ/'G /\qՕׅ1hpG~qO_9ڃ撹WtаHD! JoF弮vk{fxc*R;8U1#yN& ;cޔǣ[Dvzh>PKgk(z0lɩɼчfRCLBɽ|93E;{=˹hOnyD)ZM!Ê4xo$ qښs=!Їlv2]مw`X7{7:躖L\n>3ųMH.^73)f#_ڋe=lO^ VP^ osu->E}\X{IN4P^)'Ex?,zI=Yp ^nzbpm>ӻX;]ŝTon;t1ef۟G{F>GEB  Z[Eca N~hWn ږ'Ȋsok{:KuмK.<;3ƆёXj= ^F^`*[m? .{o:?59t ZAx ?>aMdI6X=(!?VpFMSиCwx0EPF>ƝYl{Å}Ae/kvm}5fUC\XaZ㪧^{vu6Wsx"g :dϐZJ,+Lp>ok%K(029l' 4ܵLڼ`Y 3 `gDABCɆBΖX՛"5zXD\zu _aH+Krx'E~;4`"`?щ}dy6`Yd-0= `̰҄N?+jjĺRh P+XTjT1s+>/WWnNHXonڃ|pɟ* ٬{z׿#Ecj2׷wбwHo0aQoϲu؆f C/k.a~l]=^Bȣu!-@KsCo0q+ejf[f}gMol{:e\ͱ`19r=]YAM7 ޣZh@rtUt_!E2WN[:]M4;FZY# (@-S2hڞik1y~u2$] XSoσ?m{~sCՕ[ҥMƎ=C n.u˅H5ݻ|g_)]?^z38=!`EZh Zr\N=ӽW>[F;5:$viq3];3`@tۜ8q=OMD:ݛHB^gp^2 ] sB.Gt"Qk-_ym4dn͒N .KŮ~D[8/zӽJOxnM !|eݴ́jȇ;V^Q_>e=ba,ܯL {te:"jM M9hg>{簬=W]y]8GmiC@qԫni6ާ-J:BcR2S;Ҝ`cB|PE݂SUCj'0j%6EJoͺ ޏL4s+wy|&ׅȆ2~Ab*,m2jDNDs}׶@hXnuh (WvwNg;͉C5\yצӅ)wa>kL~>Su\ni0uf5yR5}9c;j}z\]b\[JK~3.U"l=×Fgpj>}{qٳ/b nr/ ZOtM>bCq m^;XozWЏlFiAwXo7x n䎂"C67p~ֶ56x~;XTؿsjnyTw{J!d{䄿 4NxNʺ|{ps ﯾ^yjmfr*ݏ<~q_'Q$vͅRg Cl،Z #'kzz#Lt~oA\jVG@!Ѡ>bZW'0PC2ֽѳEǤ}fvCL ]8QssoG>+x7>qEMOg{VKVH{k'7]00ۿ5~_Aۿ~\hS=8b ) izk6 2ω M)Ӷeà]oB~F{>F (UB5|0l KC6`H<5yaBzWsUTף^[\zpĚ ?078\\Jt#" L^^A`hbAq4/5}AuPpTs8lg*Q sԠKa!Z`zٺ=uF|7gӻhj!r#U / !Ll 6A`Yn?&a̛b. x{#ss 35q6tk^ _t=g[ސ,F,/Z h^ź-vc0E 䘟~¤hdͳ36{718!myՠz,kVm QCXB0E pe.5:`mkuіB Z$K5FX40 @g"P4P h+c!GW_Ě Zy I?Mv4Cl>s1 #hmO4yw)ȂʪuŊ9]ra/wa޺Z=yuFX2tbR3]zg{9.GWrh- uV_ڴIZ : B-2fѧ^Vpk+*)w~ms].nAui+U3^1|>u(Pk=/鞇hs?lO/AV}?ޞuR=Vp_wA[=ў}ݎzudd%5YH&U0¦`.NAZsV`lch'>4K]ϷJ=z&V6vKYO?c|қ',e2-ҖL:eOvt" }jWaXG u'l>?qCS`?p5{5_Yi;8vڼ+ضewr(yP}`U.4ľVՃBým]z*U#/ }qSҽ=~~nʭhǢ7WuD{c]ݮz'܅l}_h>vtR2Z(5֒W@CB NwJs cǞ;wvqV+N~|m [ VßY>iE#Gh$^Vxjo&ò#~W!vTX0sX=~sXÛ]Nu5gkfX!WuQ^wtc8wE{uńci° nŒ _'v(t4w>Lx. ^ZFvd^knսKzvsuP[׫cunvVҶnw4ݝ2`I VsVC~R`MMì[]G9n)¶u n2WƭYrOqF}JuVOw O0Ekmӿ j59n(t4kȮzgÛh3l7vw>SOnGu(HG*]lN'PBLvG-<FdZ7y`W Z:WX2WBU[ج`.ktVN3=5hi 5FZV /07{} ~XV5DZR'݆>kb+ 7 f|+ʹhDqasђžUzP[5Z=}e0`[o*0js_` [TO_pf?ا sw;YyB S:/9qCPh2.:9泺|׽};0`x{Oe+GL՟|KxxpR3.\ןBVSVp֐bmQ`-ĥ!&2d[{l-jVvtV}sH]C (tS)P3{._1~GYßDGz^Yd]P\è@(:ty(Xs}eRQ w00ƍ;Gzzꅒ Ph2 m q=+6k>n~]Yץ9V~ee=74XϗSN{wP:)Qlتn5_WY* Ws ~GXGU~PO7] mUM=b{$4+Eeܜi۞6 ;_5zZgEI|X?j%ƅAv=[JATy >itC=ijUp|\wuEuB`}RYa_º4kxw9p ݎ{:q@F\U<nTCpܕ[l옱?-BqV\9ҷ9XMcuQCg]VRd;H5TZ7 _ ]*፮ nm_sCY/= a5@Fc7%0G}wP4t=xZQtRO"3C|F<i[a[UPR0 U ba58ThSOkBzO4A_'l?!T[ (NM^y: o<ܷz Z,E(8mzkVĢSNKCm[ (N}ӑ4y٭"@ݖ'ZnL@a5j;z_rwPGe\;gseeZM@qv7n<U/G,klh+]N߸@o6KYZDnS`0JQ*Ű705ɒ+\m~th=ia|nv) G|b,ɫ1.E&T?Cpt]w|G[j QF Pdvڃ];fj !JY_u(ODnwTӑw:M^3JjNsp -,^=‰"}, M"xdɓ=*e[(NMI<9ZplNZ 7^ v}=y(aQ>B*@qj3tLwSҞ7ރVs7S"=^ ,x}d(Z6 ?cDTj|h.z037}0VNTtUc9@?'[m T'}P1Ҡ6FF(AA:8 $i=`$f W2[QC,5FMN I^;7YezHo7EUUZzNծYy MX/=)=O۹P *YZbuz+ޚ\"{m<-ԥI羰J5=\+KˍK@`Q/u[s⃼)S|ս rµ~~!Kcpd{B$:~':ѵU tЃ]7tء|4xhhbHACAFl1C?6|y$]mhz8 ϴ_'uVUjKw?"^|'AO ˊ!ѱ!эY^}T}tϲз];! xaᣎNsLzIgg?2Wgu?Dwu+WU ?m8艤s8ud< _L^Hc#,?CmĔ3ϧw~M*)cӯzXhl~g|\Oy"vt y!tOv g}4={|-]|Ǔ_YxY7J|7Vk6:[{.'8|n:Smnkk*7ؾg6Y u^-$Do=ߦnj?vӖ$]QtDcCںVh'g]ңݏ[CmwW~z~49}] &7Oe`%CÛF v9e}u}cgϢU;SzQ / t1:9kӿȺӻ ŕrR(Z[(X b,1,/.y;r{ʟ9 ԕûb]pi^vV>KdCcq$shg&}78^Z ;FX аijDlcU|^9~$EzW̢c;&vC.bA?۔wO].;=ce;-ҹ/㕺.Փc`%C3]15+ƖH>=_:)?+z lhtַx)|tРAw6tQqf}IbU1c7κB% ~;m b~LOo`%CÚU"j]]qѣܾhl@ڻ[} v?3KҹzЍfTb(Esf]cda{p̗.m83zb8#CAV WnAjz(uqwSNRpq~<8p>47} =S?Px,B/ _/lgn4go-5z{g)/s'+9=/?39sC7MMۇW&-nc ŋnM]Qhk}2[8ʼn!8S9,+iNoTE|ڴKv wiiiEi< ř`kg94!b7x~3[;ӳm'o[Qi '_R179=yq8wk  YŜɞX/=s0uԸcqNԶ;ibQZ۶q|aäw8ApۭwfzRVq@꺖l;b1fݶn ĸx.v4x~%-(4f]XF6{֕闖ӴX9NDU^o'zgU-/ >`qu5},s1g{=ԋKNbH u.zN֎+>* !iYڧn{zސuy)tzΨRv&]pK/J[[ۊ L_a+4u}3kl3?'ULתac?>u3fآ"4cޥ]s}fƌ>*sTRs ^qBؐf#1 b˪#ƌ '[ ~ ^ȷů~\[Fg&#Gܐte޼7!+cc~q{j%0{b3_V~^^ϷM*?x[I4J׷;Eu\ҹڴ'".QuE~ 1bBW;N׷I룋N;ڱB0$gيg;cmTOVdy߉'sO&94C`?x,H`smk>6bGg$ HotΉBN=SۉXF P&V_Y/b+-,_D "ija^hU7~:: ~j'\^pÆ^0.ɇCX:-ԁ_>'(=EiӣTa/Ƌbt"|ű;pAרXkk< =8 }<5?XMkL鿿jn:hPGoCTif+= jO C?#(X8huNϟLpRY}=/N:t`z)ϹQ,42]_.dۍ rvÞ֊~X^kÈ^6R$h]lqd/N l^ݠRZ\:c vÉ>2Rxn(74]_=|h|;^ cWNO}|M9>]_=/'y NvI78WlCFl_τ8u@ϡCo^+t}B)̆>/kꖺ{pg9]_ ǟt঺&pZFa=B`t/;T _FL I`?V5' lGjoH@ONjgt5s7:yȡoTOtz/Z/jAMhxyA&K4MSx~a{M Ԗֶ} XѺWvOeNྫVt@t}zxa4Ts^ӹ7|9U|k^G~t>vt?m.]LzG?ͶDBMӹ?k֞d|;vn|]v\\![G@(.+%7.Ч9`'=,[9 XJ!5G{bvӟңbN<# _ݝn?Ϯxb}2dIw"put˗1Թt[XʷfgN_boÙcs\.JE0U5{>r<@cSxē*V<ūZױxU /**.a#V~^׻flu["5{Q?@Wjӿha4?ݍ1gXWϕdcNoRss_>p1L=/@?b Gk 6or  ]-.bj\/i.ٱ'p Ibp >ou:EǹYpq" NʶBLa,BmtvϘ~Y~c:9:hcE-].u\.Wn7o' 9h`Y|Ęۛm>ERv4(]_~x.U;+9/@Y/֧}zz7/@-i wl#+)<@c+;gTwV,?p t}\,L5VT<g x1m~b^N~˱qa : `&?r9ti/ꔮ/@/BxC9H dCsLz?>}z<~uHP?ҿzګ㓦S|Xؔ?4x?m%R{U.ܙ04Iz;ο|v[W\_ [*u: @fzҹkzt՞o uB]\Z`áUu/5Ől8q'aa,)ݚN2%?/־"6i|gIg^}phii @`Lk1c/]#6YŇӃV5Q`ޮ+7>7b.<7/bf^78imk^PlZ|Ǔᣎ.8 ڶ9 -@W.(9/=jG7#v!@y}~wc['sD7M'V Rt}v}g䦬{|7/}֞-$[odwء)o) @? @x3$X-G,l秃RtOc9]_Q%SJ aKG+Q,O[9]_;xA/ui~Nl@ˋ[rEגγ힪sԍXsNcF?B =)Co˂/uB6R7mn^m/n>3zZ[ ]_"\i'{Q +v5CFzRyPt}h7ɷ7zg깳袻PЭ:~Mfzu q|Uҹa]_F{rusNgcq'm1UzA;Kbt }"?C+7ڪҷt-_K/;rѿ Q3+@*qx^yFxKu0c1O +~ghٍy>laaP6;^n甤 J=t.Kh1G1M@ay19:qtӍwҏ?t"yȏ$; DPIVZt~k7!3Cm9j쿍?_6߿] oumё<#7R3\ul#XG:|Y{dו< @W@tΓ]T{{>6*=wqI@?24NynZԨED]_/nc?)t}hh4<]_/ O @t}hh4<]_/ O @t}hh4<]_/ O @t}hh4<]_ZNkwUjr7oDO_ڝ7tmiи\_n tZ6ָJJ-/S+'+uK&t;]Q^U5~%I>Qݯ?xRެԖPɻT*OckPtz_ϿZ%u5{mkSpWKgU>󟽝_<<qo P[#u}{L3a4D*:mYu Km:ߏ]VGSl(ͅ}$5p,5)U!͠؃F +44]Оt}hh{65o:w6Wj?@7+ȟt}߂dsuoo;#y]kæԔ9Rw]ˍI|%Ie***uh6zpJmozRVJmԖJM*]U~MyP]_fk)D'u~~K[Xos~/_GhnN׷pRX- N>_OͿ>&IDAT0]!yC|stk?zn@݋aљZ+aBAv ~,vg_']?wӲs'NJwأt}Gt~{]&ZXyrp!q7~{XtW_5/ 5BJ-K:_kɮ'&~ޘ11/7~i=Kmj񥟤kN$;w1'Yb(J<n nj?6 {1O7 7c]ߗ)>=.8@`N,!C-^tk'^rtצ]=L5N.DnoIqI7X*Ve!lBbwiIgqG`+*ӱ9r^9\ ޒt bǝP^AJT-Vu%[<} ϼ*dQ[s5hr-\Xl) Y_"6lx1G8VShvΎUc;W*DW_5/mmm-p<Σ<ftV7MO~hR*5mZpil`łH;Vxmzd1G>N:Lh81sSۜ񥟤_xYye LL:MJi[k[wjw.z3)'n__Ӿ:Cp pԤsgz͆ ~\__X:E~^ф$87!_'=ty^ ԑIU {~ބp!D.f ^<áK c-v(݊0#G'} vj/.o4Wֶ'Щ믽upt$y\/ȩ3 G0@<cǧsANXE (8-hPh[#]Tf঺u(?<u}mdKKKR}o7խuۭw]M5-rTt ;wypJ{KK˯sUE]r@_}1TƗ~nC}> Cs5գu%W8CR{X0A?t+c J8)җ۴e_xSjR{nԟàwmhKK;cj#,tU7/=' CHY<cR{=zՔ$N+Bl{꟯#|YX{:h>0#i"vwy\bpNtcuZzwy_Q5eЮ.׏c(w#t'847H̗`O|6 ~^;: ]<,Pa-'>qG|ώ0븏3λhuޯ<-\r1X7BX~@oZa$`udqOZC#x~eY6E8rX#GKWɾ_.^01 p#DodtP]s伬}Of߻"gmO %INl[j M[]j8Bgtp#xVV׍[ōǜbq|/_.9' 0|I-*/U x1a{Vѱ-GcXuݍ[+՝.x NsgP?N9?75ƿ[^t*mQsv8a3"{ su]b(t9ǪԵ.]V#cXM>ſkO^~b8/z@8pjzXXd)j17!6:jb$fcxrb"m.oa6n7:q{QXV\&`G:.WGp֑eIޅl}fs/ȶ,WF8V"aq/-'WW=[_Wtv1^|lTE/*sI;;w!%k ӓ(zb}s]׽}~C8XH:Z[[$6bV}_=FЗV$ ϬU} KZ[[żZ'+u f:n@joo,BJ,X:i@05WK7U\}^{{7|YMh*ڣ]0?vnSSTZ~_+/吃YKu-"r?n|CSTOwC0 G27zWӣǎ/@#|kK?F J u<̤+nOS]V qEE}R>@Y=Ҳ{=U[ߖT;aԫI ^VO߳.@[PO|Ÿ:fa϶;w"\>{N?*R@w|&09} MTqc5gsrb+MD:a=w[RѵWjY1$z[];;ו^E09_]@C?qRCyjh*B^&λwoI:;MozkkI>7KWdAJzd]-v1R)EՂeT _?svߖMأK+a*:VCW~N|d{iV/UAw[}U]LԺ"l}E /#+ޱȑ|zJ_]j5g?˲톢ƌ>*sTokت0R3+01xC^M:;{ Xq9+uiҹUuuW W;[EIյ(yQW׋۰0@wqXIENDB`././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/images/sahara-architecture.svg0000664000175000017500000115000400000000000023033 0ustar00zuulzuul00000000000000 image/svg+xml ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/index.rst0000664000175000017500000000175600000000000016777 0ustar00zuulzuul00000000000000Welcome to Sahara! ================== The sahara project aims to provide users with a simple means to provision data processing frameworks (such as Apache Hadoop, Apache Spark and Apache Storm) on OpenStack. This is accomplished by specifying configuration parameters such as the framework version, cluster topology, node hardware details and more. Overview -------- .. toctree:: :maxdepth: 2 intro/index Installation ------------ .. toctree:: :maxdepth: 2 install/index Configuration ------------- .. toctree:: :maxdepth: 2 configuration/index User Guide ---------- .. toctree:: :maxdepth: 2 user/index CLI Guide --------- .. toctree:: :maxdepth: 2 cli/index Operator Documentation ---------------------- .. toctree:: :maxdepth: 2 admin/index Contributor Documentation ------------------------- .. toctree:: :maxdepth: 2 contributor/index Programming Reference --------------------- .. toctree:: :maxdepth: 2 reference/index ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.649891 sahara-16.0.0/doc/source/install/0000775000175000017500000000000000000000000016573 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/install/dashboard-guide.rst0000664000175000017500000000530300000000000022350 0ustar00zuulzuul00000000000000Sahara Dashboard Configuration Guide ==================================== After installing the Sahara dashboard, there are a few extra configurations that can be made. Dashboard configurations are applied through Horizon's local_settings.py file. The sample configuration file is available `from the Horizon repository. `_ 1. Networking ------------- Depending on the Networking backend (Neutron) used in the cloud, Sahara panels will determine automatically which input fields should be displayed. If you wish to disable floating IP options during node group template creation, add the following parameter: Example: .. sourcecode:: python SAHARA_FLOATING_IP_DISABLED = True .. 2. Different endpoint --------------------- Sahara UI panels normally use ``data-processing`` endpoint from Keystone to talk to Sahara service. In some cases it may be useful to switch to another endpoint, for example use locally installed Sahara instead of the one on the OpenStack controller. To switch the UI to another endpoint the endpoint should be registered in the first place. Local endpoint example: .. code-block:: $ openstack service create --name sahara_local --description \ "Sahara Data Processing (local installation)" \ data_processing_local $ openstack endpoint create --region RegionOne \ data_processing_local public http://127.0.0.1:8386/v1.1/%\(project_id\)s $ openstack endpoint create --region RegionOne \ data_processing_local internal http://127.0.0.1:8386/v1.1/%\(project_id\)s $ openstack endpoint create --region RegionOne \ data_processing_local admin http://127.0.0.1:8386/v1.1/%\(project_id\)s .. Then the endpoint name should be changed in ``sahara.py`` under the module of `sahara-dashboard/sahara_dashboard/api/sahara.py `__. .. sourcecode:: python # "type" of Sahara service registered in keystone SAHARA_SERVICE = 'data_processing_local' 3. Hiding health check info --------------------------- Sahara UI panels normally contain some information about cluster health. If the relevant functionality has been disabled in the Sahara service, then operators may prefer to not have any references to health at all in the UI, since there would not be any usable health information in that case. The visibility of health check info can be toggled via the ``SAHARA_VERIFICATION_DISABLED`` parameter, whose default value is False, meaning that the health check info will be visible. Example: .. sourcecode:: python SAHARA_VERIFICATION_DISABLED = True .. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/install/index.rst0000664000175000017500000000020100000000000020425 0ustar00zuulzuul00000000000000================== Installation Guide ================== .. toctree:: :maxdepth: 2 installation-guide dashboard-guide ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/install/installation-guide.rst0000664000175000017500000002046000000000000023123 0ustar00zuulzuul00000000000000Sahara Installation Guide ========================= We recommend installing sahara in a way that will keep your system in a consistent state. We suggest the following options: * Install via `Fuel `_ * Install via :kolla-ansible-doc:`Kolla <>` * Install via `RDO `_ * Install into a virtual environment To install with Fuel -------------------- 1. Start by following the `MOS Quickstart `_ to install and setup OpenStack. 2. Enable the sahara service during installation. To install with Kolla --------------------- 1. Start by following the :kolla-ansible-doc:`Kolla Quickstart ` to install and setup OpenStack. 2. Enable the sahara service during installation. To install with RDO ------------------- 1. Start by following the `RDO Quickstart `_ to install and setup OpenStack. 2. Install sahara: .. sourcecode:: console # yum install openstack-sahara .. 3. Configure sahara as needed. The configuration file is located in ``/etc/sahara/sahara.conf``. For details see :doc:`Sahara Configuration Guide <../admin/configuration-guide>` 4. Create the database schema: .. sourcecode:: console # sahara-db-manage --config-file /etc/sahara/sahara.conf upgrade head .. 5. Go through :ref:`common_installation_steps` and make any necessary changes. 6. Start the sahara-api and sahara-engine services: .. sourcecode:: console # systemctl start openstack-sahara-api # systemctl start openstack-sahara-engine .. 7. *(Optional)* Enable sahara services to start on boot .. sourcecode:: console # systemctl enable openstack-sahara-api # systemctl enable openstack-sahara-engine .. To install into a virtual environment ------------------------------------- 1. First you need to install a number of packages with your OS package manager. The list of packages depends on the OS you use. For Ubuntu run: .. sourcecode:: console $ sudo apt-get install python-setuptools python-virtualenv python-dev .. For Fedora: .. sourcecode:: console $ sudo yum install gcc python-setuptools python-virtualenv python-devel .. For CentOS: .. sourcecode:: console $ sudo yum install gcc python-setuptools python-devel $ sudo easy_install pip $ sudo pip install virtualenv 2. Setup a virtual environment for sahara: .. sourcecode:: console $ virtualenv sahara-venv .. This will install a python virtual environment into ``sahara-venv`` directory in your current working directory. This command does not require super user privileges and can be executed in any directory where the current user has write permissions. 3. You can get a sahara archive from ``_ and install it using pip: .. sourcecode:: console $ sahara-venv/bin/pip install 'http://tarballs.openstack.org/sahara/sahara-master.tar.gz' .. Note that ``sahara-master.tar.gz`` contains the latest changes and might not be stable at the moment. We recommend browsing ``_ and selecting the latest stable release. For installation just execute (where replace the 'release' word with release name, e.g. 'mitaka'): .. sourcecode:: console $ sahara-venv/bin/pip install 'http://tarballs.openstack.org/sahara/sahara-stable-release.tar.gz' .. For example, you can get Sahara Mitaka release by executing: .. sourcecode:: console $ sahara-venv/bin/pip install 'http://tarballs.openstack.org/sahara/sahara-stable-mitaka.tar.gz' .. 4. After installation you should create a configuration file; as seen below it is possible to generate a sample one: .. sourcecode:: console $ SAHARA_SOURCE_DIR="/path/to/sahara/source" $ pushd $SAHARA_SOURCE_DIR $ tox -e genconfig $ popd $ cp $SAHARA_SOURCE_DIR/etc/sahara/sahara.conf.sample sahara-venv/etc/sahara.conf .. Make any necessary changes to ``sahara-venv/etc/sahara.conf``. For details see :doc:`Sahara Configuration Guide <../admin/configuration-guide>` .. _common_installation_steps: Common installation steps ------------------------- The steps below are common to both the RDO and virtual environment installations of sahara. 1. If you use sahara with a MySQL database, then for storing big job binaries in the sahara internal database you must configure the size of the maximum allowed packet. Edit the ``my.cnf`` file and change the ``max_allowed_packet`` parameter as follows: .. sourcecode:: ini ... [mysqld] ... max_allowed_packet = 256M .. Then restart the mysql server to ensure these changes are active. 2. Create the database schema: .. sourcecode:: console $ sahara-venv/bin/sahara-db-manage --config-file sahara-venv/etc/sahara.conf upgrade head .. 3. Start sahara services from different terminals: .. sourcecode:: console # first terminal $ sahara-venv/bin/sahara-api --config-file sahara-venv/etc/sahara.conf # second terminal $ sahara-venv/bin/sahara-engine --config-file sahara-venv/etc/sahara.conf .. .. _register-sahara-label: 4. For sahara to be accessible in the OpenStack Dashboard and for python-saharaclient to work properly you must register sahara in the Identity service catalog. For example: .. code-block:: $ openstack service create --name sahara --description \ "Sahara Data Processing" data-processing $ openstack endpoint create --region RegionOne \ data-processing public http://10.0.0.2:8386/v1.1/%\(project_id\)s $ openstack endpoint create --region RegionOne \ data-processing internal http://10.0.0.2:8386/v1.1/%\(project_id\)s $ openstack endpoint create --region RegionOne \ data-processing admin http://10.0.0.2:8386/v1.1/%\(project_id\)s .. note:: You have to install the openstack-client package in order to execute ``openstack`` command. .. 5. For more information on configuring sahara with the OpenStack Dashboard please see :doc:`dashboard-guide`. Optional installation of default templates ------------------------------------------ Sahara bundles default templates that define simple clusters for the supported plugins. These templates may optionally be added to the sahara database using a simple CLI included with sahara. The default template CLI is described in detail in a *README* file included with the sahara sources at ``/db/templates/README.rst`` but it is summarized here. Flavor id values must be specified for the default templates included with sahara. The recommended configuration values below correspond to the *m1.medium* and *m1.large* flavors in a default OpenStack installation (if these flavors have been edited, their corresponding values will be different). Values for flavor_id should be added to ``/etc/sahara/sahara.conf`` or another configuration file in the sections shown here: .. sourcecode:: ini [DEFAULT] # Use m1.medium for {flavor_id} unless specified in another section flavor_id = 2 [cdh-5-default-namenode] # Use m1.large for {flavor_id} in the cdh-5-default-namenode template flavor_id = 4 [cdh-530-default-namenode] # Use m1.large for {flavor_id} in the cdh-530-default-namenode template flavor_id = 4 The above configuration values are included in a sample configuration file at ``/plugins/default_templates/template.conf`` The command to install all of the default templates is as follows, where ``$PROJECT_ID`` should be a valid project id and the above configuration values have been set in ``myconfig``: .. sourcecode:: console $ sahara-templates --config-file /etc/sahara/sahara.conf --config-file myconfig update -t $PROJECT_ID Help is available from the ``sahara-templates`` command: .. sourcecode:: console $ sahara-templates --help $ sahara-templates update --help Notes: ------ Ensure that your operating system is not blocking the sahara port (default: 8386). You may need to configure iptables in CentOS and other Linux distributions to allow this access. To get the list of all possible options run: .. sourcecode:: console $ sahara-venv/bin/python sahara-venv/bin/sahara-api --help $ sahara-venv/bin/python sahara-venv/bin/sahara-engine --help .. Further, consider reading :doc:`../intro/overview` for general sahara concepts and :doc:`../user/plugins` for specific plugin features/requirements. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.653891 sahara-16.0.0/doc/source/intro/0000775000175000017500000000000000000000000016260 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/intro/architecture.rst0000664000175000017500000000250700000000000021500 0ustar00zuulzuul00000000000000Architecture ============ .. image:: ../images/sahara-architecture.svg :width: 960 :height: 635 :scale: 83 % :align: left The Sahara architecture consists of several components: * Auth component - responsible for client authentication & authorization, communicates with the OpenStack Identity service (keystone). * DAL - Data Access Layer, persists internal models in DB. * Secure Storage Access Layer - persists the authentication data like passwords and private keys in a secure storage. * Provisioning Engine - component responsible for communication with the OpenStack Compute (nova), Orchestration (heat), Block Storage (cinder), Image (glance), and DNS (designate) services. * Vendor Plugins - pluggable mechanism responsible for configuring and launching data processing frameworks on provisioned VMs. Existing management solutions like Apache Ambari and Cloudera Management Console could be utilized for that purpose as well. * EDP - :doc:`../user/edp` responsible for scheduling and managing data processing jobs on clusters provisioned by sahara. * REST API - exposes sahara functionality via REST HTTP interface. * Python Sahara Client - like other OpenStack components, sahara has its own python client. * Sahara pages - a GUI for the sahara is located in the OpenStack Dashboard (horizon). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/intro/index.rst0000664000175000017500000000030400000000000020116 0ustar00zuulzuul00000000000000=============== Sahara Overview =============== General overview of Sahara. .. toctree:: :maxdepth: 2 overview architecture Roadmap ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/intro/overview.rst0000664000175000017500000002037400000000000020666 0ustar00zuulzuul00000000000000Rationale ========= Introduction ------------ Apache Hadoop is an industry standard and widely adopted MapReduce implementation, it is one among a growing number of data processing frameworks. The aim of this project is to enable users to easily provision and manage clusters with Hadoop and other data processing frameworks on OpenStack. It is worth mentioning that Amazon has provided Hadoop for several years as Amazon Elastic MapReduce (EMR) service. Sahara aims to provide users with a simple means to provision Hadoop, Spark, and Storm clusters by specifying several parameters such as the framework version, cluster topology, hardware node details and more. After a user fills in all the parameters, sahara deploys the cluster in a few minutes. Also sahara provides means to scale an already provisioned cluster by adding or removing worker nodes on demand. The solution will address the following use cases: * fast provisioning of data processing clusters on OpenStack for development and quality assurance(QA). * utilization of unused compute power from a general purpose OpenStack IaaS cloud. * "Analytics as a Service" for ad-hoc or bursty analytic workloads (similar to AWS EMR). Key features are: * designed as an OpenStack component. * managed through a REST API with a user interface(UI) available as part of OpenStack Dashboard. * support for a variety of data processing frameworks: * multiple Hadoop vendor distributions. * Apache Spark and Storm. * pluggable system of Hadoop installation engines. * integration with vendor specific management tools, such as Apache Ambari and Cloudera Management Console. * predefined configuration templates with the ability to modify parameters. Details ------- The sahara product communicates with the following OpenStack services: * Dashboard (horizon) - provides a GUI with ability to use all of sahara's features. * Identity (keystone) - authenticates users and provides security tokens that are used to work with OpenStack, limiting a user's abilities in sahara to their OpenStack privileges. * Compute (nova) - used to provision VMs for data processing clusters. * Bare metal (ironic) - used to provision Baremetal nodes for data processing clusters. * Orchestration (heat) - used to provision and orchestrate the deployment of data processing clusters. * Image (glance) - stores VM images, each image containing an operating system and a pre-installed data processing distribution or framework. * Object Storage (swift) - can be used as storage for job binaries and data that will be processed or created by framework jobs. * Block Storage (cinder) - can be used to provision block storage for VM instances. * Networking (neutron) - provides networking services to data processing clusters. * DNS service (designate) - provides ability to communicate with cluster instances and Hadoop services by their hostnames. * Telemetry (ceilometer) - used to collect measures of cluster usage for metering and monitoring purposes. * Shared file systems (manila) - can be used for storage of framework job binaries and data that will be processed or created by jobs. * Key manager (barbican & castellan) - persists the authentication data like passwords and private keys in a secure storage. .. image:: ../images/openstack-interop.png :width: 960 :height: 720 :scale: 83 % :align: left General Workflow ---------------- Sahara will provide two levels of abstraction for the API and UI based on the addressed use cases: cluster provisioning and analytics as a service. For fast cluster provisioning a generic workflow will be as following: * select a Hadoop (or framework) version. * select a base image with or without pre-installed data processing framework: * for base images without a pre-installed framework, sahara will support pluggable deployment engines that integrate with vendor tooling. * define cluster configuration, including cluster size, topology, and framework parameters (for example, heap size): * to ease the configuration of such parameters, configurable templates are provided. * provision the cluster; sahara will provision nodes (VMs or baremetal), install and configure the data processing framework. * perform operations on the cluster; add or remove nodes. * terminate the cluster when it is no longer needed. For analytics as a service, a generic workflow will be as following: * select one of the predefined data processing framework versions. * configure a job: * choose the type of job: pig, hive, jar-file, etc. * provide the job script source or jar location. * select input and output data location. * set the limit for the cluster size. * execute the job: * all cluster provisioning and job execution will happen transparently to the user. * if using a transient cluster, it will be removed automatically after job completion. * get the results of computations (for example, from swift). User's Perspective ------------------ While provisioning clusters through sahara, the user operates on three types of entities: Node Group Templates, Cluster Templates and Clusters. A Node Group Template describes a group of nodes within cluster. It contains a list of processes that will be launched on each instance in a group. Also a Node Group Template may provide node scoped configurations for those processes. This kind of template encapsulates hardware parameters (flavor) for the node instance and configuration for data processing framework processes running on the node. A Cluster Template is designed to bring Node Group Templates together to form a Cluster. A Cluster Template defines what Node Groups will be included and how many instances will be created for each. Some data processing framework configurations can not be applied to a single node, but to a whole Cluster. A user can specify these kinds of configurations in a Cluster Template. Sahara enables users to specify which processes should be added to an anti-affinity group within a Cluster Template. If a process is included into an anti-affinity group, it means that instances where this process is going to be launched should be scheduled to different hardware hosts. The Cluster entity represents a collection of instances that all have the same data processing framework installed. It is mainly characterized by an image with a pre-installed framework which will be used for cluster deployment. Users may choose one of the pre-configured Cluster Templates to start a Cluster. To get access to instances after a Cluster has started, the user should specify a keypair. Sahara provides several constraints on cluster framework topology. You can see all constraints in the documentation for the appropriate plugin. Each Cluster belongs to an Identity service project determined by the user. Users have access only to objects located in projects they have access to. Users can edit and delete only objects they have created or exist in their projects. Naturally, admin users have full access to every object. In this manner, sahara complies with general OpenStack access policy. Integration with Object Storage ------------------------------- The swift project provides the standard Object Storage service for OpenStack environments; it is an analog of the Amazon S3 service. As a rule it is deployed on bare metal machines. It is natural to expect data processing on OpenStack to access data stored there. Sahara provides this option with a file system implementation for swift `HADOOP-8545 `_ and `Change I6b1ba25b `_ which implements the ability to list endpoints for an object, account or container. This makes it possible to integrate swift with software that relies on data locality information to avoid network overhead. To get more information on how to enable swift support see :doc:`../user/hadoop-swift`. Pluggable Deployment and Monitoring ----------------------------------- In addition to the monitoring capabilities provided by vendor-specific Hadoop management tooling, sahara provides pluggable integration with external monitoring systems such as Nagios or Zabbix. Both deployment and monitoring tools can be installed on standalone VMs, thus allowing a single instance to manage and monitor several clusters at once. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.653891 sahara-16.0.0/doc/source/reference/0000775000175000017500000000000000000000000017063 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/reference/edp-spi.rst0000664000175000017500000001501600000000000021161 0ustar00zuulzuul00000000000000Elastic Data Processing (EDP) SPI ================================= The EDP job engine objects provide methods for creating, monitoring, and terminating jobs on Sahara clusters. Provisioning plugins that support EDP must return an EDP job engine object from the :ref:`get_edp_engine` method described in :doc:`plugin-spi`. Sahara provides subclasses of the base job engine interface that support EDP on clusters running Oozie, Spark, and/or Storm. These are described below. .. _edp_spi_job_types: Job Types --------- Some of the methods below test job type. Sahara supports the following string values for job types: * Hive * Java * Pig * MapReduce * MapReduce.Streaming * Spark * Shell * Storm .. note:: Constants for job types are defined in *sahara.utils.edp*. Job Status Values ----------------- Several of the methods below return a job status value. A job status value is a dictionary of the form: {'status': *job_status_value*} where *job_status_value* is one of the following string values: * DONEWITHERROR * FAILED * TOBEKILLED * KILLED * PENDING * RUNNING * SUCCEEDED Note, constants for job status are defined in *sahara.utils.edp* EDP Job Engine Interface ------------------------ The sahara.service.edp.base_engine.JobEngine class is an abstract class with the following interface: cancel_job(job_execution) ~~~~~~~~~~~~~~~~~~~~~~~~~ Stops the running job whose id is stored in the job_execution object. *Returns*: None if the operation was unsuccessful or an updated job status value. get_job_status(job_execution) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns the current status of the job whose id is stored in the job_execution object. *Returns*: a job status value. run_job(job_execution) ~~~~~~~~~~~~~~~~~~~~~~ Starts the job described by the job_execution object *Returns*: a tuple of the form (job_id, job_status_value, job_extra_info). * *job_id* is required and must be a string that allows the EDP engine to uniquely identify the job. * *job_status_value* may be None or a job status value * *job_extra_info* may be None or optionally a dictionary that the EDP engine uses to store extra information on the job_execution_object. validate_job_execution(cluster, job, data) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Checks whether or not the job can run on the cluster with the specified data. Data contains values passed to the */jobs//execute* REST API method during job launch. If the job cannot run for any reason, including job configuration, cluster configuration, or invalid data, this method should raise an exception. *Returns*: None get_possible_job_config(job_type) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns hints used by the Sahara UI to prompt users for values when configuring and launching a job. Note that no hints are required. See :doc:`../user/edp` for more information on how configuration values, parameters, and arguments are used by different job types. *Returns*: a dictionary of the following form, containing hints for configs, parameters, and arguments for the job type: {'job_config': {'configs': [], 'params': {}, 'args': []}} * *args* is a list of strings * *params* contains simple key/value pairs * each item in *configs* is a dictionary with entries for 'name' (required), 'value', and 'description' get_supported_job_types() ~~~~~~~~~~~~~~~~~~~~~~~~~ This method returns the job types that the engine supports. Not all engines will support all job types. *Returns*: a list of job types supported by the engine. Oozie Job Engine Interface -------------------------- The sahara.service.edp.oozie.engine.OozieJobEngine class is derived from JobEngine. It provides implementations for all of the methods in the base interface but adds a few more abstract methods. Note that the *validate_job_execution(cluster, job, data)* method does basic checks on the job configuration but probably should be overloaded to include additional checks on the cluster configuration. For example, the job engines for plugins that support Oozie add checks to make sure that the Oozie service is up and running. get_hdfs_user() ~~~~~~~~~~~~~~~ Oozie uses HDFS to distribute job files. This method gives the name of the account that is used on the data nodes to access HDFS (such as 'hadoop' or 'hdfs'). The Oozie job engine expects that HDFS contains a directory for this user under */user/*. *Returns*: a string giving the username for the account used to access HDFS on the cluster. create_hdfs_dir(remote, dir_name) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The remote object *remote* references a node in the cluster. This method creates the HDFS directory *dir_name* under the user specified by *get_hdfs_user()* in the HDFS accessible from the specified node. For example, if the HDFS user is 'hadoop' and the dir_name is 'test' this method would create '/user/hadoop/test'. The reason that this method is broken out in the interface as an abstract method is that different versions of Hadoop treat path creation differently. *Returns*: None get_oozie_server_uri(cluster) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns the full URI for the Oozie server, for example *http://my_oozie_host:11000/oozie*. This URI is used by an Oozie client to send commands and queries to the Oozie server. *Returns*: a string giving the Oozie server URI. get_oozie_server(self, cluster) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns the node instance for the host in the cluster running the Oozie server. *Returns*: a node instance. get_name_node_uri(self, cluster) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns the full URI for the Hadoop NameNode, for example *http://master_node:8020*. *Returns*: a string giving the NameNode URI. get_resource_manager_uri(self, cluster) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns the full URI for the Hadoop JobTracker for Hadoop version 1 or the Hadoop ResourceManager for Hadoop version 2. *Returns*: a string giving the JobTracker or ResourceManager URI. Spark Job Engine ---------------- The sahara.service.edp.spark.engine.SparkJobEngine class provides a full EDP implementation for Spark standalone clusters. .. note:: The *validate_job_execution(cluster, job, data)* method does basic checks on the job configuration but probably should be overloaded to include additional checks on the cluster configuration. For example, the job engine returned by the Spark plugin checks that the Spark version is >= 1.0.0 to ensure that *spark-submit* is available. get_driver_classpath(self) ~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns driver class path. *Returns*: a string of the following format ' --driver-class-path *class_path_value*'. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/reference/index.rst0000664000175000017500000000034400000000000020725 0ustar00zuulzuul00000000000000===================== Programming Reference ===================== Plugins and EDP =============== .. toctree:: :maxdepth: 2 plugins plugin-spi edp-spi REST API ======== .. toctree:: :maxdepth: 2 restapi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/reference/plugin-spi.rst0000664000175000017500000004434700000000000021720 0ustar00zuulzuul00000000000000Plugin SPI ========== Plugin interface ---------------- get_versions() ~~~~~~~~~~~~~~ Returns all available versions of the plugin. Depending on the plugin, this version may map directly to the HDFS version, or it may not; check your plugin's documentation. It is responsibility of the plugin to make sure that all required images for each hadoop version are available, as well as configs and whatever else that plugin needs to create the Hadoop cluster. *Returns*: list of strings representing plugin versions *Example return value*: ["1.2.1", "2.3.0", "2.4.1"] get_configs( hadoop_version ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lists all configs supported by the plugin with descriptions, defaults, and targets for which this config is applicable. *Returns*: list of configs *Example return value*: (("JobTracker heap size", "JobTracker heap size, in MB", "int", "512", `"mapreduce"`, "node", True, 1)) get_node_processes( hadoop_version ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns all supported services and node processes for a given Hadoop version. Each node process belongs to a single service and that relationship is reflected in the returned dict object. See example for details. *Returns*: dictionary having entries (service -> list of processes) *Example return value*: {"mapreduce": ["tasktracker", "jobtracker"], "hdfs": ["datanode", "namenode"]} get_required_image_tags( hadoop_version ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Lists tags that should be added to OpenStack Image via Image Registry. Tags are used to filter Images by plugin and hadoop version. *Returns*: list of tags *Example return value*: ["tag1", "some_other_tag", ...] validate( cluster ) ~~~~~~~~~~~~~~~~~~~ Validates a given cluster object. Raises a *SaharaException* with a meaningful message in the case of validation failure. *Returns*: None *Example exception*: validate_scaling( cluster, existing, additional ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ To be improved. Validates a given cluster before scaling operation. *Returns*: list of validation_errors update_infra( cluster ) ~~~~~~~~~~~~~~~~~~~~~~~ This method is no longer used now that Sahara utilizes Heat for OpenStack resource provisioning, and is not currently utilized by any plugin. *Returns*: None configure_cluster( cluster ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configures cluster on the VMs provisioned by sahara. In this function the plugin should perform all actions like adjusting OS, installing required packages (including Hadoop, if needed), configuring Hadoop, etc. *Returns*: None start_cluster( cluster ) ~~~~~~~~~~~~~~~~~~~~~~~~ Start already configured cluster. This method is guaranteed to be called only on a cluster which was already prepared with configure_cluster(...) call. *Returns*: None scale_cluster( cluster, instances ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Scale an existing cluster with additional instances. The instances argument is a list of ready-to-configure instances. Plugin should do all configuration operations in this method and start all services on those instances. *Returns*: None .. _get_edp_engine: get_edp_engine( cluster, job_type ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Returns an EDP job engine object that supports the specified job_type on the given cluster, or None if there is no support. The EDP job engine object returned must implement the interface described in :doc:`edp-spi`. The job_type is a String matching one of the job types listed in :ref:`edp_spi_job_types`. *Returns*: an EDP job engine object or None decommission_nodes( cluster, instances ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Scale cluster down by removing a list of instances. The plugin should stop services on the provided list of instances. The plugin also may need to update some configurations on other instances when nodes are removed; if so, this method must perform that reconfiguration. *Returns*: None on_terminate_cluster( cluster ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When user terminates cluster, sahara simply shuts down all the cluster VMs. This method is guaranteed to be invoked before that, allowing the plugin to do some clean-up. *Returns*: None get_open_ports( node_group ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When user requests sahara to automatically create a security group for the node group (``auto_security_group`` property set to True), sahara will call this plugin method to get a list of ports that need to be opened. *Returns*: list of ports to be open in auto security group for the given node group get_edp_job_types( versions ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optional method, which provides the ability to see all supported job types for specified plugin versions. *Returns*: dict with supported job types for specified versions of plugin recommend_configs( self, cluster, scaling=False ) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optional method, which provides recommendations for cluster configuration before creating/scaling operation. get_image_arguments( self, hadoop_version ): ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optional method, which gets the argument set taken by the plugin's image generator, or NotImplemented if the plugin does not provide image generation support. See :doc:`../contributor/image-gen`. *Returns*: A sequence with items of type sahara.plugins.images.ImageArgument. pack_image( self, hadoop_version, remote, test_only=False, ... ): ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Optional method which packs an image for registration in Glance and use by Sahara. This method is called from the image generation CLI rather than from the Sahara api or engine service. See :doc:`../contributor/image-gen`. *Returns*: None (modifies the image pointed to by the remote in-place.) validate_images( self, cluster, test_only=False, image_arguments=None ): ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Validates the image to be used to create a cluster, to ensure that it meets the specifications of the plugin. See :doc:`../contributor/image-gen`. *Returns*: None; may raise a sahara.plugins.exceptions.ImageValidationError Object Model ============ Here is a description of all the objects involved in the API. Notes: - clusters and node_groups have 'extra' fields allowing the plugin to persist any supplementary info about the cluster. - node_process is just a process that runs on some node in cluster. Example list of node processes: 1. jobtracker 2. namenode 3. tasktracker 4. datanode - Each plugin may have different names for the same processes. Config ------ An object, describing one configuration entry +-------------------+--------+------------------------------------------------+ | Property | Type | Description | +===================+========+================================================+ | name | string | Config name. | +-------------------+--------+------------------------------------------------+ | description | string | A hint for user, what this config is used for. | +-------------------+--------+------------------------------------------------+ | config_type | enum | possible values are: 'string', 'integer', | | | | 'boolean', 'enum'. | +-------------------+--------+------------------------------------------------+ | config_values | list | List of possible values, if config_type is | | | | enum. | +-------------------+--------+------------------------------------------------+ | default_value | string | Default value for config. | +-------------------+--------+------------------------------------------------+ | applicable_target | string | The target could be either a service returned | | | | by get_node_processes(...) call | | | | in form of 'service:', or | | | | 'general'. | +-------------------+--------+------------------------------------------------+ | scope | enum | Could be either 'node' or 'cluster'. | +-------------------+--------+------------------------------------------------+ | is_optional | bool | If is_optional is False and no default_value | | | | is specified, user must provide a value. | +-------------------+--------+------------------------------------------------+ | priority | int | 1 or 2. A Hint for UI. Configs with priority | | | | *1* are always displayed. | | | | Priority *2* means user should click a button | | | | to see the config. | +-------------------+--------+------------------------------------------------+ User Input ---------- Value provided by user for a specific config. +----------+--------+--------------------------------------------------------+ | Property | Type | Description | +==========+========+========================================================+ | config | config | A config object for which this user_input is provided. | +----------+--------+--------------------------------------------------------+ | value | ... | Value for the config. Type depends on Config type. | +----------+--------+--------------------------------------------------------+ Instance -------- An instance created for cluster. +---------------+---------+---------------------------------------------------+ | Property | Type | Description | +===============+=========+===================================================+ | instance_id | string | Unique instance identifier. | +---------------+---------+---------------------------------------------------+ | instance_name | string | OpenStack instance name. | +---------------+---------+---------------------------------------------------+ | internal_ip | string | IP to communicate with other instances. | +---------------+---------+---------------------------------------------------+ | management_ip | string | IP of instance, accessible outside of internal | | | | network. | +---------------+---------+---------------------------------------------------+ | volumes | list | List of volumes attached to instance. Empty if | | | | ephemeral drive is used. | +---------------+---------+---------------------------------------------------+ | nova_info | object | Nova instance object. | +---------------+---------+---------------------------------------------------+ | username | string | Username, that sahara uses for establishing | | | | remote connections to instance. | +---------------+---------+---------------------------------------------------+ | hostname | string | Same as instance_name. | +---------------+---------+---------------------------------------------------+ | fqdn | string | Fully qualified domain name for this instance. | +---------------+---------+---------------------------------------------------+ | remote | helpers | Object with helpers for performing remote | | | | operations. | +---------------+---------+---------------------------------------------------+ Node Group ---------- Group of instances. +----------------------+--------+---------------------------------------------+ | Property | Type | Description | +======================+========+=============================================+ | name | string | Name of this Node Group in Cluster. | +----------------------+--------+---------------------------------------------+ | flavor_id | string | OpenStack Flavor used to boot instances. | +----------------------+--------+---------------------------------------------+ | image_id | string | Image id used to boot instances. | +----------------------+--------+---------------------------------------------+ | node_processes | list | List of processes running on each instance. | +----------------------+--------+---------------------------------------------+ | node_configs | dict | Configs dictionary, applied to instances. | +----------------------+--------+---------------------------------------------+ | volumes_per_node | int | Number of volumes mounted to each instance. | | | | 0 means use ephemeral drive. | +----------------------+--------+---------------------------------------------+ | volumes_size | int | Size of each volume (GB). | +----------------------+--------+---------------------------------------------+ | volumes_mount_prefix | string | Prefix added to mount path of each volume. | +----------------------+--------+---------------------------------------------+ | floating_ip_pool | string | Floating IP Pool name. All instances in the | | | | Node Group will have Floating IPs assigned | | | | from this pool. | +----------------------+--------+---------------------------------------------+ | count | int | Number of instances in this Node Group. | +----------------------+--------+---------------------------------------------+ | username | string | Username used by sahara to establish remote | | | | connections to instances. | +----------------------+--------+---------------------------------------------+ | configuration | dict | Merged dictionary of node configurations | | | | and cluster configurations. | +----------------------+--------+---------------------------------------------+ | storage_paths | list | List of directories where storage should be | | | | placed. | +----------------------+--------+---------------------------------------------+ Cluster ------- Contains all relevant info about cluster. This object is provided to the plugin for both cluster creation and scaling. The "Cluster Lifecycle" section below further specifies which fields are filled at which moment. +----------------------------+--------+---------------------------------------+ | Property | Type | Description | +============================+========+=======================================+ | name | string | Cluster name. | +----------------------------+--------+---------------------------------------+ | project_id | string | OpenStack Project id where this | | | | Cluster is available. | +----------------------------+--------+---------------------------------------+ | plugin_name | string | Plugin name. | +----------------------------+--------+---------------------------------------+ | hadoop_version | string | Hadoop version running on instances. | +----------------------------+--------+---------------------------------------+ | default_image_id | string | OpenStack image used to boot | | | | instances. | +----------------------------+--------+---------------------------------------+ | node_groups | list | List of Node Groups. | +----------------------------+--------+---------------------------------------+ | cluster_configs | dict | Dictionary of Cluster scoped | | | | configurations. | +----------------------------+--------+---------------------------------------+ | cluster_template_id | string | Cluster Template used for Node Groups | | | | and Configurations. | +----------------------------+--------+---------------------------------------+ | user_keypair_id | string | OpenStack keypair added to instances | | | | to make them accessible for user. | +----------------------------+--------+---------------------------------------+ | neutron_management_network | string | Neutron network ID. Instances will | | | | get fixed IPs in this network. | +----------------------------+--------+---------------------------------------+ | anti_affinity | list | List of processes that will be run on | | | | different hosts. | +----------------------------+--------+---------------------------------------+ | description | string | Cluster Description. | +----------------------------+--------+---------------------------------------+ | info | dict | Dictionary for additional information.| +----------------------------+--------+---------------------------------------+ Validation Error ---------------- Describes what is wrong with one of the values provided by user. +---------------+--------+-----------------------------------------------+ | Property | Type | Description | +===============+========+===============================================+ | config | config | A config object that is not valid. | +---------------+--------+-----------------------------------------------+ | error_message | string | Message that describes what exactly is wrong. | +---------------+--------+-----------------------------------------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/reference/plugins.rst0000664000175000017500000000227300000000000021302 0ustar00zuulzuul00000000000000Pluggable Provisioning Mechanism ================================ Sahara can be integrated with 3rd party management tools like Apache Ambari and Cloudera Management Console. The integration is achieved using the plugin mechanism. In short, responsibilities are divided between the Sahara core and a plugin as follows. Sahara interacts with the user and uses Heat to provision OpenStack resources (VMs, baremetal servers, security groups, etc.) The plugin installs and configures a Hadoop cluster on the provisioned instances. Optionally, a plugin can deploy management and monitoring tools for the cluster. Sahara provides plugins with utility methods to work with provisioned instances. A plugin must extend the `sahara.plugins.provisioning:ProvisioningPluginBase` class and implement all the required methods. Read :doc:`plugin-spi` for details. The `instance` objects provided by Sahara have a `remote` property which can be used to interact with instances. The `remote` is a context manager so you can use it in `with instance.remote:` statements. The list of available commands can be found in `sahara.utils.remote.InstanceInteropHelper`. See the source code of the Vanilla plugin for usage examples. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/reference/restapi.rst0000664000175000017500000000757400000000000021301 0ustar00zuulzuul00000000000000Sahara REST API v1.1 ******************** 1 General API information ========================= This section contains base info about the sahara REST API design. 1.1 Authentication and Authorization ------------------------------------ The sahara API uses the OpenStack Identity service as the default authentication service. When the Identity service is enabled, users who submit requests to the sahara service must provide an authentication token in the ``X-Auth-Token`` request header. A user can obtain the token by authenticating to the Identity service endpoint. For more information about the Identity service, please see the :keystone-doc:`keystone project developer documentation <>`. With each request, a user must specify the keystone project in the url path, for example: '/v1.1/{project_id}/clusters'. Sahara will perform the requested operation in the specified project using the provided credentials. Therefore, clusters may be created and managed only within projects to which the user has access. 1.2 Request / Response Types ---------------------------- The sahara API supports the JSON data serialization format. This means that for requests that contain a body, the ``Content-Type`` header must be set to the MIME type value ``application/json``. Also, clients should accept JSON serialized responses by specifying the ``Accept`` header with the MIME type value ``application/json`` or adding the ``.json`` extension to the resource name. The default response format is ``application/json`` if the client does not specify an ``Accept`` header or append the ``.json`` extension in the URL path. Example: .. sourcecode:: text GET /v1.1/{project_id}/clusters.json or .. sourcecode:: text GET /v1.1/{project_id}/clusters Accept: application/json 1.3 Navigation by response -------------------------- Sahara API supports delivering response data by pages. User can pass two parameters in API GET requests which return an array of objects. The parameters are: ``limit`` - maximum number of objects in response data. This parameter must be a positive integer number. ``marker`` - ID of the last element on the list which won't be in response. Example: Get 15 clusters after cluster with id=d62ad147-5c10-418c-a21a-3a6597044f29: .. sourcecode:: text GET /v1.1/{project_id}/clusters?limit=15&marker=d62ad147-5c10-418c-a21a-3a6597044f29 For convenience, response contains markers of previous and following pages which are named 'prev' and 'next' fields. Also there is ``sort_by`` parameter for sorting objects. Sahara API supports ascending and descending sorting. Examples: Sort clusters by name: .. sourcecode:: text GET /v1.1/{project_id}/clusters?sort_by=name Sort clusters by date of creation in descending order: .. sourcecode:: text GET /v1.1/{project_id}/clusters?sort_by=-created_at 1.4 Faults ---------- The sahara API returns an error response if a failure occurs while processing a request. Sahara uses only standard HTTP error codes. 4xx errors indicate problems in the particular request being sent from the client and 5xx errors indicate server-side problems. The response body will contain richer information about the cause of the error. An error response follows the format illustrated by the following example: .. sourcecode:: http HTTP/1.1 400 BAD REQUEST Content-type: application/json Content-length: 126 { "error_name": "CLUSTER_NAME_ALREADY_EXISTS", "error_message": "Cluster with name 'test-cluster' already exists", "error_code": 400 } The ``error_code`` attribute is an HTTP response code. The ``error_name`` attribute indicates the generic error type without any concrete ids or names, etc. The last attribute, ``error_message``, contains a human readable error description. 2 API ===== - `Sahara REST API Reference (OpenStack API Complete Reference - DataProcessing) `_ ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.653891 sahara-16.0.0/doc/source/user/0000775000175000017500000000000000000000000016103 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.657891 sahara-16.0.0/doc/source/user/building-guest-images/0000775000175000017500000000000000000000000022270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/building-guest-images/baremetal.rst0000664000175000017500000000055600000000000024764 0ustar00zuulzuul00000000000000.. _building-baremetal-images-label: Bare metal images ----------------- Images that can be used for bare metal deployment through Ironic can be generated using both image building tools: sahara-image-create: pass the -b parameters to the command sahara-image-pack: use `virt-get-kernel` on the generated image to extract the kernel and the initramfs file ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/building-guest-images/sahara-image-create.rst0000664000175000017500000000546400000000000026613 0ustar00zuulzuul00000000000000sahara-image-create ------------------- The historical tool for building images, ``sahara-image-create``, is based on `Disk Image Builder `_. `Disk Image Builder` builds disk images using elements. An element is a particular set of code that alters how the image is built, or runs within the chroot to prepare the image. The additional elements required by Sahara images and the ``sahara-image-create`` command itself are stored in the `Sahara image elements repository `_ To create images for a specific plugin follow these steps: 1. Clone repository "https://opendev.org/openstack/sahara-image-elements" locally. 2. Use tox to build images. You can run the command below in sahara-image-elements directory to build images. By default this script will attempt to create cloud images for all versions of supported plugins and all operating systems (subset of Ubuntu, Fedora, and CentOS depending on plugin). .. sourcecode:: tox -e venv -- sahara-image-create -u If you want to build a image for ```` with ```` on a specific ```` just execute: .. sourcecode:: tox -e venv -- sahara-image-create -p -v -i Tox will create a virtualenv and install required python packages in it, clone the repositories "https://opendev.org/openstack/diskimage-builder" and "https://opendev.org/openstack/sahara-image-elements" and export necessary parameters. The valid values for the ```` argument are: - Ubuntu (all versions): ``ubuntu`` - CentOS 7: ``centos7`` - Fedora: ``fedora`` ``sahara-image-create`` will then create the required cloud images using image elements that install all the necessary packages and configure them. You will find created images in the parent directory. Variables ~~~~~~~~~ The following environment variables can be used to change the behavior of the image building: * ``JAVA_DOWNLOAD_URL`` - download link for JDK (tarball or bin) * ``DIB_IMAGE_SIZE`` - parameter that specifies a volume of hard disk of instance. You need to specify it only for Fedora because Fedora doesn't use all available volume The following variables can be used to change the name of the output image: * ``centos7_image_name`` * ``ubuntu_image_name`` * ``fedora_image_name`` .. note:: Disk Image Builder will generate QCOW2 images, used with the default OpenStack Qemu/KVM hypervisors. If your OpenStack uses a different hypervisor, the generated image should be converted to an appropriate format. For finer control of ``sahara-image-create`` see the `official documentation `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/building-guest-images/sahara-image-pack.rst0000664000175000017500000000766100000000000026267 0ustar00zuulzuul00000000000000.. _sahara-image-pack-label: sahara-image-pack ----------------- The CLI command ``sahara-image-pack`` operates in-place on an existing image and installs and configures the software required for the plugin. The script ``sahara-image-pack`` takes the following primary arguments: :: --config-file PATH Path to a config file to use. Multiple config files can be specified, with values in later files taking precedence. Defaults to None. --image IMAGE The path to an image to modify. This image will be modified in-place: be sure to target a copy if you wish to maintain a clean master image. --root-filesystem ROOT_FS The filesystem to mount as the root volume on the image. Novalue is required if only one filesystem is detected. --test-only If this flag is set, no changes will be made to the image; instead, the script will fail if discrepancies are found between the image and the intended state. After these arguments, the script takes ``PLUGIN`` and ``VERSION`` arguments. These arguments will allow any plugin and version combination which supports the image packing feature. Plugins may require their own arguments at specific versions; use the ``--help`` feature with ``PLUGIN`` and ``VERSION`` to see the appropriate argument structure. a plausible command-line invocation would be: :: sahara-image-pack --image CentOS.qcow2 \ --config-file etc/sahara/sahara.conf \ cdh 5.7.0 [cdh 5.7.0 specific arguments, if any] This script will modify the target image in-place. Please copy your image if you want a backup or if you wish to create multiple images from a single base image. This CLI will automatically populate the set of available plugins and versions from the plugin set loaded in Sahara, and will show any plugin for which the image packing feature is available. The next sections of this guide will first describe how to modify an image packing specification for one of the plugins, and second, how to enable the image packing feature for new or existing plugins. Note: In case of a RHEL 7 images, it is necessary to register the image before starting to pack it, also enable some required repos. :: virt-customize -v -a $SAHARA_RHEL_IMAGE --sm-register \ --sm-credentials ${REG_USER}:password:${REG_PASSWORD} --sm-attach \ pool:${REG_POOL_ID} --run-command 'subscription-manager repos \ --disable=* --enable=$REPO_A \ --enable=$REPO_B \ --enable=$REPO_C' Installation and developer notes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The script is part of the Sahara repository, but it does not depend on the Sahara services. In order to use its development version, clone the `Sahara repository `_, check out the branch which matches the Sahara version used, and install the repository in a virtualenv. The script is also provided by binary distributions of OpenStack. For example, RDO ships it in the ``openstack-sahara-image-pack`` package. The script depends on a python library which is not packaged in pip, but is available through yum, dnf, and apt. If you have installed Sahara through yum, dnf, or apt, you should have appropriate dependencies, but if you wish to use the script but are working with Sahara from source, run whichever of the following is appropriate to your OS: :: sudo yum install libguestfs python3-libguestfs libguestfs-tools sudo dnf install libguestfs python3-libguestfs libguestfs-tools sudo apt-get install libguestfs python3-guestfs libguestfs-tools If you are using tox to create virtual environments for your Sahara work, please use the ``images`` environment to run sahara-image-pack. This environment is configured to use system site packages, and will thus be able to find its dependency on python-libguestfs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/building-guest-images.rst0000664000175000017500000000321100000000000023017 0ustar00zuulzuul00000000000000.. _building-guest-images-label: Building guest images ===================== Sahara plugins represent different Hadoop or other Big Data platforms and requires specific guest images. While it is possible to use cloud images which only contain the basic software requirements (also called *plain images*), their usage slows down the cluster provisioning process and was not throughly tested recently. It is strongly advised to build images which contain the software required to create the clusters for the various plugins and use them instead of *plain images*. Sahara currently provides two different tools for building guest images: - ``sahara-image-pack`` is newer and support more recent images; - ``sahara-image-create`` is the older tool. Both tools are described in the details in the next sections. The documentation of each plugin describes which method is supported for the various versions. If both are supported, ``sahara-image-pack`` is recommended. General requirements for guest images ------------------------------------- There are few common requirements for all guest images, which must be based on GNU/Linux distributions. * cloud-init must be installed * the ssh server must be installed * the firewall, if enabled, must allow connections on port 22 (ssh) The cloud images provided by the GNU/Linux distributions respect those requirements. Each plugin specifies additional requirements. The image building tools provided by Sahara take care of preparing the images with those additional requirements. .. toctree:: building-guest-images/sahara-image-pack building-guest-images/sahara-image-create building-guest-images/baremetal ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/dashboard-user-guide.rst0000664000175000017500000004625700000000000022651 0ustar00zuulzuul00000000000000Sahara (Data Processing) UI User Guide ====================================== This guide assumes that you already have the sahara service and Horizon dashboard up and running. Don't forget to make sure that sahara is registered in Keystone. If you require assistance with that, please see the `installation guide <../install/installation-guide.html>`_. The sections below give a panel by panel overview of setting up clusters and running jobs. For a description of using the guided cluster and job tools, look at `Launching a cluster via the Cluster Creation Guide`_ and `Running a job via the Job Execution Guide`_. Launching a cluster via the sahara UI ------------------------------------- Registering an Image -------------------- 1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then click on the "Clusters" panel and finally the "Image Registry" tab. 2) From that page, click on the "Register Image" button at the top right 3) Choose the image that you'd like to register with sahara 4) Enter the username of the cloud-init user on the image 5) Choose plugin and version to make the image available only for the intended clusters 6) Click the "Done" button to finish the registration Create Node Group Templates --------------------------- 1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then click on the "Clusters" panel and then the "Node Group Templates" tab. 2) From that page, click on the "Create Template" button at the top right 3) Choose your desired Plugin name and Version from the dropdowns and click "Next" 4) Give your Node Group Template a name (description is optional) 5) Choose a flavor for this template (based on your CPU/memory/disk needs) 6) Choose the storage location for your instance, this can be either "Ephemeral Drive" or "Cinder Volume". If you choose "Cinder Volume", you will need to add additional configuration 7) Switch to the Node processes tab and choose which processes should be run for all instances that are spawned from this Node Group Template 8) Click on the "Create" button to finish creating your Node Group Template Create a Cluster Template ------------------------- 1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then click on the "Clusters" panel and finally the "Cluster Templates" tab. 2) From that page, click on the "Create Template" button at the top right 3) Choose your desired Plugin name and Version from the dropdowns and click "Next" 4) Under the "Details" tab, you must give your template a name 5) Under the "Node Groups" tab, you should add one or more nodes that can be based on one or more templates - To do this, start by choosing a Node Group Template from the dropdown and click the "+" button - You can adjust the number of nodes to be spawned for this node group via the text box or the "-" and "+" buttons - Repeat these steps if you need nodes from additional node group templates 6) Optionally, you can adjust your configuration further by using the "General Parameters", "HDFS Parameters" and "MapReduce Parameters" tabs 7) If you have Designate DNS service you can choose the domain name in "DNS" tab for internal and external hostname resolution 8) Click on the "Create" button to finish creating your Cluster Template Launching a Cluster ------------------- 1) Navigate to the "Project" dashboard, then to the "Data Processing" tab, then click on the "Clusters" panel and lastly, click on the "Clusters" tab. 2) Click on the "Launch Cluster" button at the top right 3) Choose your desired Plugin name and Version from the dropdowns and click "Next" 4) Give your cluster a name (required) 5) Choose which cluster template should be used for your cluster 6) Choose the image that should be used for your cluster (if you do not see any options here, see `Registering an Image`_ above) 7) Optionally choose a keypair that can be used to authenticate to your cluster instances 8) Click on the "Create" button to start your cluster - Your cluster's status will display on the Clusters table - It will likely take several minutes to reach the "Active" state Scaling a Cluster ----------------- 1) From the Data Processing/Clusters page (Clusters tab), click on the "Scale Cluster" button of the row that contains the cluster that you want to scale 2) You can adjust the numbers of instances for existing Node Group Templates 3) You can also add a new Node Group Template and choose a number of instances to launch - This can be done by selecting your desired Node Group Template from the dropdown and clicking the "+" button - Your new Node Group will appear below and you can adjust the number of instances via the text box or the "+" and "-" buttons 4) To confirm the scaling settings and trigger the spawning/deletion of instances, click on "Scale" Elastic Data Processing (EDP) ----------------------------- Data Sources ------------ Data Sources are where the input and output from your jobs are housed. 1) From the Data Processing/Jobs page (Data Sources tab), click on the "Create Data Source" button at the top right 2) Give your Data Source a name 3) Enter the URL of the Data Source - For a swift object, enter / (ie: *mycontainer/inputfile*). sahara will prepend *swift://* for you - For an HDFS object, enter an absolute path, a relative path or a full URL: + */my/absolute/path* indicates an absolute path in the cluster HDFS + *my/path* indicates the path */user/hadoop/my/path* in the cluster HDFS assuming the defined HDFS user is *hadoop* + *hdfs://host:port/path* can be used to indicate any HDFS location 4) Enter the username and password for the Data Source (also see `Additional Notes`_) 5) Enter an optional description 6) Click on "Create" 7) Repeat for additional Data Sources Job Binaries ------------ Job Binaries are where you define/upload the source code (mains and libraries) for your job. 1) From the Data Processing/Jobs (Job Binaries tab), click on the "Create Job Binary" button at the top right 2) Give your Job Binary a name (this can be different than the actual filename) 3) Choose the type of storage for your Job Binary - For "swift", enter the URL of your binary (/) as well as the username and password (also see `Additional Notes`_) - For "manila", choose the share and enter the path for the binary in this share. This assumes that you have already stored that file in the appropriate path on the share. The share will be automatically mounted to any cluster nodes which require access to the file, if it is not mounted already. - For "Internal database", you can choose from "Create a script" or "Upload a new file" (**only API v1.1**) 4) Enter an optional description 5) Click on "Create" 6) Repeat for additional Job Binaries Job Templates (Known as "Jobs" in the API) ------------------------------------------ Job templates are where you define the type of job you'd like to run as well as which "Job Binaries" are required. 1) From the Data Processing/Jobs page (Job Templates tab), click on the "Create Job Template" button at the top right 2) Give your Job Template a name 3) Choose the type of job you'd like to run 4) Choose the main binary from the dropdown - This is required for Hive, Pig, and Spark jobs - Other job types do not use a main binary 5) Enter an optional description for your Job Template 6) Click on the "Libs" tab and choose any libraries needed by your job template - MapReduce and Java jobs require at least one library - Other job types may optionally use libraries 7) Click on "Create" Jobs (Known as "Job Executions" in the API) ------------------------------------------- Jobs are what you get by "Launching" a job template. You can monitor the status of your job to see when it has completed its run 1) From the Data Processing/Jobs page (Job Templates tab), find the row that contains the job template you want to launch and click either "Launch on New Cluster" or "Launch on Existing Cluster" the right side of that row 2) Choose the cluster (already running--see `Launching a Cluster`_ above) on which you would like the job to run 3) Choose the Input and Output Data Sources (Data Sources defined above) 4) If additional configuration is required, click on the "Configure" tab - Additional configuration properties can be defined by clicking on the "Add" button - An example configuration entry might be mapred.mapper.class for the Name and org.apache.oozie.example.SampleMapper for the Value 5) Click on "Launch". To monitor the status of your job, you can navigate to the Data Processing/Jobs panel and click on the Jobs tab. 6) You can relaunch a Job from the Jobs page by using the "Relaunch on New Cluster" or "Relaunch on Existing Cluster" links - Relaunch on New Cluster will take you through the forms to start a new cluster before letting you specify input/output Data Sources and job configuration - Relaunch on Existing Cluster will prompt you for input/output Data Sources as well as allow you to change job configuration before launching the job Example Jobs ------------ There are sample jobs located in the sahara repository. In this section, we will give a walkthrough on how to run those jobs via the Horizon UI. These steps assume that you already have a cluster up and running (in the "Active" state). You may want to clone into https://opendev.org/openstack/sahara-tests/ so that you will have all of the source code and inputs stored locally. 1) Sample Pig job - https://opendev.org/openstack/sahara-tests/src/branch/master/sahara_tests/scenario/defaults/edp-examples/edp-pig/cleanup-string/example.pig - Load the input data file from https://opendev.org/openstack/sahara-tests/src/branch/master/sahara_tests/scenario/defaults/edp-examples/edp-pig/cleanup-string/data/input into swift - Click on Project/Object Store/Containers and create a container with any name ("samplecontainer" for our purposes here) - Click on Upload Object and give the object a name ("piginput" in this case) - Navigate to Data Processing/Jobs/Data Sources, Click on Create Data Source - Name your Data Source ("pig-input-ds" in this sample) - Type = Swift, URL samplecontainer/piginput, fill-in the Source username/password fields with your username/password and click "Create" - Create another Data Source to use as output for the job - Name = pig-output-ds, Type = Swift, URL = samplecontainer/pigoutput, Source username/password, "Create" - Store your Job Binaries in Swift (you can choose another type of storage if you want) - Navigate to Project/Object Store/Containers, choose "samplecontainer" - Click on Upload Object and find example.pig at /sahara-tests/scenario/defaults/edp-examples/ edp-pig/cleanup-string/, name it "example.pig" (or other name). The Swift path will be swift://samplecontainer/example.pig - Click on Upload Object and find edp-pig-udf-stringcleaner.jar at /sahara-tests/scenario/defaults/edp-examples/ edp-pig/cleanup-string/, name it "edp-pig-udf-stringcleaner.jar" (or other name). The Swift path will be swift://samplecontainer/edp-pig-udf-stringcleaner.jar - Navigate to Data Processing/Jobs/Job Binaries, Click on Create Job Binary - Name = example.pig, Storage type = Swift, URL = samplecontainer/example.pig, Username = , Password = - Create another Job Binary: Name = edp-pig-udf-stringcleaner.jar, Storage type = Swift, URL = samplecontainer/edp-pig-udf-stringcleaner.jar, Username = , Password = - Create a Job Template - Navigate to Data Processing/Jobs/Job Templates, Click on Create Job Template - Name = pigsample, Job Type = Pig, Choose "example.pig" as the main binary - Click on the "Libs" tab and choose "edp-pig-udf-stringcleaner.jar", then hit the "Choose" button beneath the dropdown, then click on "Create" - Launch your job - To launch your job from the Job Templates page, click on the down arrow at the far right of the screen and choose "Launch on Existing Cluster" - For the input, choose "pig-input-ds", for output choose "pig-output-ds". Also choose whichever cluster you'd like to run the job on - For this job, no additional configuration is necessary, so you can just click on "Launch" - You will be taken to the "Jobs" page where you can see your job progress through "PENDING, RUNNING, SUCCEEDED" phases - When your job finishes with "SUCCEEDED", you can navigate back to Object Store/Containers and browse to the samplecontainer to see your output. It should be in the "pigoutput" folder 2) Sample Spark job - https://opendev.org/openstack/sahara-tests/src/branch/master/sahara_tests/scenario/defaults/edp-examples/edp-spark You can clone into https://opendev.org/openstack/sahara-tests/ for quicker access to the files for this sample job. - Store the Job Binary in Swift (you can choose another type of storage if you want) - Click on Project/Object Store/Containers and create a container with any name ("samplecontainer" for our purposes here) - Click on Upload Object and find spark-wordcount.jar at /sahara-tests/scenario/defaults/edp-examples/ edp-spark/, name it "spark-wordcount.jar" (or other name). The Swift path will be swift://samplecontainer/spark-wordcount.jar - Navigate to Data Processing/Jobs/Job Binaries, Click on Create Job Binary - Name = sparkexample.jar, Storage type = Swift, URL = samplecontainer/spark-wordcount.jar, Username = , Password = - Create a Job Template - Name = sparkexamplejob, Job Type = Spark, Main binary = Choose sparkexample.jar, Click "Create" - Launch your job - To launch your job from the Job Templates page, click on the down arrow at the far right of the screen and choose "Launch on Existing Cluster" - Choose whichever cluster you'd like to run the job on - Click on the "Configure" tab - Set the main class to be: sahara.edp.spark.SparkWordCount - Under Arguments, click Add and fill url for the input file, once more click Add and fill url for the output file. - Click on Launch - You will be taken to the "Jobs" page where you can see your job progress through "PENDING, RUNNING, SUCCEEDED" phases - When your job finishes with "SUCCEEDED", you can see your results in your output file. - The stdout and stderr files of the command used for executing your job are located at /tmp/spark-edp// on Spark master node in case of Spark clusters, or on Spark JobHistory node in other cases like Vanilla, CDH and so on. Additional Notes ---------------- 1) Throughout the sahara UI, you will find that if you try to delete an object that you will not be able to delete it if another object depends on it. An example of this would be trying to delete a Job Template that has an existing Job. In order to be able to delete that job, you would first need to delete any Job Templates that relate to that job. 2) In the examples above, we mention adding your username/password for the swift Data Sources. It should be noted that it is possible to configure sahara such that the username/password credentials are *not* required. For more information on that, please refer to: :doc:`Sahara Advanced Configuration Guide <../admin/advanced-configuration-guide>` Launching a cluster via the Cluster Creation Guide -------------------------------------------------- 1) Under the Data Processing group, choose "Clusters" and then click on the "Clusters" tab. The "Cluster Creation Guide" button is above that table. Click on it. 2) Click on the "Choose Plugin" button then select the cluster type from the Plugin Name dropdown and choose your target version. When done, click on "Select" to proceed. 3) Click on "Create a Master Node Group Template". Give your template a name, choose a flavor and choose which processes should run on nodes launched for this node group. The processes chosen here should be things that are more server-like in nature (namenode, oozieserver, spark master, etc). Optionally, you can set other options here such as availability zone, storage, security and process specific parameters. Click on "Create" to proceed. 4) Click on "Create a Worker Node Group Template". Give your template a name, choose a flavor and choose which processes should run on nodes launched for this node group. Processes chosen here should be more worker-like in nature (datanode, spark slave, task tracker, etc). Optionally, you can set other options here such as availability zone, storage, security and process specific parameters. Click on "Create" to proceed. 5) Click on "Create a Cluster Template". Give your template a name. Next, click on the "Node Groups" tab and enter the count for each of the node groups (these are pre-populated from steps 3 and 4). It would be common to have 1 for the "master" node group type and some larger number of "worker" instances depending on you desired cluster size. Optionally, you can also set additional parameters for cluster-wide settings via the other tabs on this page. Click on "Create" to proceed. 6) Click on "Launch a Cluster". Give your cluster a name and choose the image that you want to use for all instances in your cluster. The cluster template that you created in step 5 is already pre-populated. If you want ssh access to the instances of your cluster, select a keypair from the dropdown. Click on "Launch" to proceed. You will be taken to the Clusters panel where you can see your cluster progress toward the Active state. Running a job via the Job Execution Guide ----------------------------------------- 1) Under the Data Processing group, choose "Jobs" and then click on the "Jobs" tab. The "Job Execution Guide" button is above that table. Click on it. 2) Click on "Select type" and choose the type of job that you want to run. 3) If your job requires input/output data sources, you will have the option to create them via the "Create a Data Source" button (Note: This button will not be shown for job types that do not require data sources). Give your data source a name and choose the type. If you have chosen swift, you may also enter the username and password. Enter the URL for your data source. For more details on what the URL should look like, see `Data Sources`_. 4) Click on "Create a job template". Give your job template a name. Depending on the type of job that you've chosen, you may need to select your main binary and/or additional libraries (available from the "Libs" tab). If you have not yet uploaded the files to run your program, you can add them via the "+" icon next to the "Choose a main binary" select box. 5) Click on "Launch job". Choose the active cluster where you want to run you job. Optionally, you can click on the "Configure" tab and provide any required configuration, arguments or parameters for your job. Click on "Launch" to execute your job. You will be taken to the Jobs tab where you can monitor the state of your job as it progresses. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/edp-s3.rst0000664000175000017500000000772000000000000017736 0ustar00zuulzuul00000000000000============================== EDP with S3-like Object Stores ============================== Overview and rationale of S3 integration ======================================== Since the Rocky release, Sahara clusters have full support for interaction with S3-like object stores, for example Ceph Rados Gateway. Through the abstractions offered by EDP, a Sahara job execution may consume input data and job binaries stored in S3, as well as write back its output data to S3. The copying of job binaries from S3 to a cluster is performed by the botocore library. A job's input and output to and from S3 is handled by the Hadoop-S3A driver. It's also worth noting that the Hadoop-S3A driver may be more mature and performant than the Hadoop-SwiftFS driver (either as hosted by Apache or in the sahara-extra respository). Sahara clusters are also provisioned such that data in S3-like storage can also be accessed when manually interacting with the cluster; in other words: the needed libraries are properly situated. Considerations for deployers ============================ The S3 integration features can function without any specific deployment requirement. This is because the EDP S3 abstractions can point to an arbitrary S3 endpoint. Deployers may want to consider using Sahara's optional integration with secret storage to protect the S3 access and secret keys that users will provide. Also, if using Rados Gateway for S3, deployers may want to use Keystone for RGW auth so that users can simply request Keystone EC2 credentials to access RGW's S3. S3 user experience ================== Below, details about how to use the S3 integration features are discussed. EDP job binaries in S3 ---------------------- The ``url`` must be in the format ``s3://bucket/path/to/object``, similar to the format used for binaries in Swift. The ``extra`` structure must contain ``accesskey``, ``secretkey``, and ``endpoint``, which is the URL of the S3 service, including the protocol ``http`` or ``https``. As mentioned above, the binary will be copied to the cluster before execution, by use of the botocore library. This also means that the set of credentials used to access this binary may be entirely different than those for accessing a data source. EDP data sources in S3 ---------------------- The ``url`` should be in the format ``s3://bucket/path/to/object``, although upon execution the protocol will be automatically changed to ``s3a``. The ``credentials`` does not have any required values, although the following may be set: * ``accesskey`` and ``secretkey`` * ``endpoint``, which is the URL of the S3 service, without the protocl * ``ssl``, which must be a boolean * ``bucket_in_path``, to indicate whether the S3 service uses virtual-hosted-style or path-style URLs, and must be a boolean The values above are optional, as they may be set in the cluster's ``core-site.xml`` or as configuration values of the job execution, as follows, as dictated by the options understood by the Hadoop-S3A driver: * ``fs.s3a.access.key``, corresponding to ``accesskey`` * ``fs.s3a.secret.key``, corresponding to ``secretkey`` * ``fs.s3a.endpoint``, corresponding to ``endpoint`` * ``fs.s3a.connection.ssl.enabled``, corresponding to ``ssl`` * ``fs.s3a.path.style.access``, corresponding to ``bucket_in_path`` In the case of ``fs.s3a.path.style.access``, a default value is determined by the Hadoop-S3A driver if none is set: virtual-hosted-style URLs are assumed unless told otherwise, or if the endpoint is a raw IP address. Additional configuration values are supported by the Hadoop-S3A driver, and are discussed in its official documentation. It is recommended that the EDP data source abstraction is used, rather than handling bare arguments and configuration values. If any S3 configuration values are to be set at execution time, including such situations in which those values are contained by the EDP data source abstraction, then ``edp.spark.adapt_for_swift`` or ``edp.java.adapt_for_oozie`` must be set to ``true`` as appropriate. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/edp.rst0000664000175000017500000007610400000000000017415 0ustar00zuulzuul00000000000000Elastic Data Processing (EDP) ============================= Overview -------- Sahara's Elastic Data Processing facility or :dfn:`EDP` allows the execution of jobs on clusters created from sahara. EDP supports: * Hive, Pig, MapReduce, MapReduce.Streaming, Java, and Shell job types on Hadoop clusters * Spark jobs on Spark standalone clusters, MapR (v5.0.0 - v5.2.0) clusters, Vanilla clusters (v2.7.1) and CDH clusters (v5.3.0 or higher). * storage of job binaries in the OpenStack Object Storage service (swift), the OpenStack Shared file systems service (manila), sahara's own database, or any S3-like object store * access to input and output data sources in + HDFS for all job types + swift for all types excluding Hive + manila (NFS shares only) for all types excluding Pig + Any S3-like object store * configuration of jobs at submission time * execution of jobs on existing clusters or transient clusters Interfaces ---------- The EDP features can be used from the sahara web UI which is described in the :doc:`dashboard-user-guide`. The EDP features also can be used directly by a client through the `REST api `_ EDP Concepts ------------ Sahara EDP uses a collection of simple objects to define and execute jobs. These objects are stored in the sahara database when they are created, allowing them to be reused. This modular approach with database persistence allows code and data to be reused across multiple jobs. The essential components of a job are: * executable code to run * input and output data paths, as needed for the job * any additional configuration values needed for the job run These components are supplied through the objects described below. Job Binaries ++++++++++++ A :dfn:`Job Binary` object stores a URL to a single script or Jar file and any credentials needed to retrieve the file. The file itself may be stored in the sahara internal database (**only API v1.1**), in swift, or in manila. Files in the sahara database are stored as raw bytes in a :dfn:`Job Binary Internal` object. This object's sole purpose is to store a file for later retrieval. No extra credentials need to be supplied for files stored internally. Sahara requires credentials (username and password) to access files stored in swift unless swift proxy users are configured as described in :doc:`../admin/advanced-configuration-guide`. The swift service must be running in the same OpenStack installation referenced by sahara. Sahara requires the following credentials/configs to access files stored in an S3-like object store: ``accesskey``, ``secretkey``, ``endpoint``. These credentials are specified through the `extra` in the body of the request when creating a job binary referencing S3. The value of ``endpoint`` should include a protocol: *http* or *https*. To reference a binary file stored in manila, create the job binary with the URL ``manila://{share_id}/{path}``. This assumes that you have already stored that file in the appropriate path on the share. The share will be automatically mounted to any cluster nodes which require access to the file, if it is not mounted already. There is a configurable limit on the size of a single job binary that may be retrieved by sahara. This limit is 5MB and may be set with the *job_binary_max_KB* setting in the :file:`sahara.conf` configuration file. Jobs ++++ A :dfn:`Job` object specifies the type of the job and lists all of the individual Job Binary objects that are required for execution. An individual Job Binary may be referenced by multiple Jobs. A Job object specifies a main binary and/or supporting libraries depending on its type: +-------------------------+-------------+-----------+ | Job type | Main binary | Libraries | +=========================+=============+===========+ | ``Hive`` | required | optional | +-------------------------+-------------+-----------+ | ``Pig`` | required | optional | +-------------------------+-------------+-----------+ | ``MapReduce`` | not used | required | +-------------------------+-------------+-----------+ | ``MapReduce.Streaming`` | not used | optional | +-------------------------+-------------+-----------+ | ``Java`` | not used | required | +-------------------------+-------------+-----------+ | ``Shell`` | required | optional | +-------------------------+-------------+-----------+ | ``Spark`` | required | optional | +-------------------------+-------------+-----------+ | ``Storm`` | required | not used | +-------------------------+-------------+-----------+ | ``Storm Pyelus`` | required | not used | +-------------------------+-------------+-----------+ Data Sources ++++++++++++ A :dfn:`Data Source` object stores a URL which designates the location of input or output data and any credentials needed to access the location. Sahara supports data sources in swift. The swift service must be running in the same OpenStack installation referenced by sahara. Sahara also supports data sources in HDFS. Any HDFS instance running on a sahara cluster in the same OpenStack installation is accessible without manual configuration. Other instances of HDFS may be used as well provided that the URL is resolvable from the node executing the job. Sahara supports data sources in manila as well. To reference a path on an NFS share as a data source, create the data source with the URL ``manila://{share_id}/{path}``. As in the case of job binaries, the specified share will be automatically mounted to your cluster's nodes as needed to access the data source. Finally, Sahara supports data sources referring to S3-like object stores. The URL should be of the form ``s3://{bucket}/{path}``. Also, the following credentials/configs are understood: ``accesskey``, ``secretkey``, ``endpoint``, ``bucket_in_path``, and ``ssl``. These credentials are specified through the ``credentials`` attribute of the body of the request when creating a data source referencing S3. The value of ``endpoint`` should **NOT** include a protocol (*http* or *https*), unlike when referencing an S3 job binary. It can also be noted that Sahara clusters can interact with S3-like stores even when not using EDP, i.e. when manually operating the cluster instead. Consult the `hadoop-aws documentation `_ for more information. Also, be advised that hadoop-aws will only write a job's output into a bucket which already exists: it does not create new buckets. Some job types require the use of data source objects to specify input and output when a job is launched. For example, when running a Pig job the UI will prompt the user for input and output data source objects. Other job types like Java or Spark do not require the user to specify data sources. For these job types, data paths are passed as arguments. For convenience, sahara allows data source objects to be referenced by name or id. The section `Using Data Source References as Arguments`_ gives further details. Job Execution +++++++++++++ Job objects must be *launched* or *executed* in order for them to run on the cluster. During job launch, a user specifies execution details including data sources, configuration values, and program arguments. The relevant details will vary by job type. The launch will create a :dfn:`Job Execution` object in sahara which is used to monitor and manage the job. To execute Hadoop jobs, sahara generates an Oozie workflow and submits it to the Oozie server running on the cluster. Familiarity with Oozie is not necessary for using sahara but it may be beneficial to the user. A link to the Oozie web console can be found in the sahara web UI in the cluster details. For Spark jobs, sahara uses the *spark-submit* shell script and executes the Spark job from the master node in case of Spark cluster and from the Spark Job History server in other cases. Logs of spark jobs run by sahara can be found on this node under the */tmp/spark-edp* directory. .. _edp_workflow: General Workflow ---------------- The general workflow for defining and executing a job in sahara is essentially the same whether using the web UI or the REST API. 1. Launch a cluster from sahara if there is not one already available 2. Create all of the Job Binaries needed to run the job, stored in the sahara database, in swift, or in manila + When using the REST API and internal storage of job binaries, the Job Binary Internal objects must be created first + Once the Job Binary Internal objects are created, Job Binary objects may be created which refer to them by URL 3. Create a Job object which references the Job Binaries created in step 2 4. Create an input Data Source which points to the data you wish to process 5. Create an output Data Source which points to the location for output data 6. Create a Job Execution object specifying the cluster and Job object plus relevant data sources, configuration values, and program arguments + When using the web UI this is done with the :guilabel:`Launch On Existing Cluster` or :guilabel:`Launch on New Cluster` buttons on the Jobs tab + When using the REST API this is done via the */jobs//execute* method The workflow is simpler when using existing objects. For example, to construct a new job which uses existing binaries and input data a user may only need to perform steps 3, 5, and 6 above. Of course, to repeat the same job multiple times a user would need only step 6. Specifying Configuration Values, Parameters, and Arguments ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Jobs can be configured at launch. The job type determines the kinds of values that may be set: +--------------------------+---------------+------------+-----------+ | Job type | Configuration | Parameters | Arguments | | | Values | | | +==========================+===============+============+===========+ | ``Hive`` | Yes | Yes | No | +--------------------------+---------------+------------+-----------+ | ``Pig`` | Yes | Yes | Yes | +--------------------------+---------------+------------+-----------+ | ``MapReduce`` | Yes | No | No | +--------------------------+---------------+------------+-----------+ | ``MapReduce.Streaming`` | Yes | No | No | +--------------------------+---------------+------------+-----------+ | ``Java`` | Yes | No | Yes | +--------------------------+---------------+------------+-----------+ | ``Shell`` | Yes | Yes | Yes | +--------------------------+---------------+------------+-----------+ | ``Spark`` | Yes | No | Yes | +--------------------------+---------------+------------+-----------+ | ``Storm`` | Yes | No | Yes | +--------------------------+---------------+------------+-----------+ | ``Storm Pyelus`` | Yes | No | Yes | +--------------------------+---------------+------------+-----------+ * :dfn:`Configuration values` are key/value pairs. + The EDP configuration values have names beginning with *edp.* and are consumed by sahara + Other configuration values may be read at runtime by Hadoop jobs + Currently additional configuration values are not available to Spark jobs at runtime * :dfn:`Parameters` are key/value pairs. They supply values for the Hive and Pig parameter substitution mechanisms. In Shell jobs, they are passed as environment variables. * :dfn:`Arguments` are strings passed as command line arguments to a shell or main program These values can be set on the :guilabel:`Configure` tab during job launch through the web UI or through the *job_configs* parameter when using the */jobs//execute* REST method. In some cases sahara generates configuration values or parameters automatically. Values set explicitly by the user during launch will override those generated by sahara. Using Data Source References as Arguments +++++++++++++++++++++++++++++++++++++++++ Sometimes it's necessary or desirable to pass a data path as an argument to a job. In these cases, a user may simply type out the path as an argument when launching a job. If the path requires credentials, the user can manually add the credentials as configuration values. However, if a data source object has been created that contains the desired path and credentials there is no need to specify this information manually. As a convenience, sahara allows data source objects to be referenced by name or id in arguments, configuration values, or parameters. When the job is executed, sahara will replace the reference with the path stored in the data source object and will add any necessary credentials to the job configuration. Referencing an existing data source object is much faster than adding this information by hand. This is particularly useful for job types like Java or Spark that do not use data source objects directly. There are two job configuration parameters that enable data source references. They may be used with any job type and are set on the ``Configuration`` tab when the job is launched: * ``edp.substitute_data_source_for_name`` (default **False**) If set to **True**, causes sahara to look for data source object name references in configuration values, arguments, and parameters when a job is launched. Name references have the form **datasource://name_of_the_object**. For example, assume a user has a WordCount application that takes an input path as an argument. If there is a data source object named **my_input**, a user may simply set the **edp.substitute_data_source_for_name** configuration parameter to **True** and add **datasource://my_input** as an argument when launching the job. * ``edp.substitute_data_source_for_uuid`` (default **False**) If set to **True**, causes sahara to look for data source object ids in configuration values, arguments, and parameters when a job is launched. A data source object id is a uuid, so they are unique. The id of a data source object is available through the UI or the sahara command line client. A user may simply use the id as a value. Creating an Interface for Your Job ++++++++++++++++++++++++++++++++++ In order to better document your job for cluster operators (or for yourself in the future), sahara allows the addition of an interface (or method signature) to your job template. A sample interface for the Teragen Hadoop example might be: +---------+---------+-----------+-------------+----------+--------------------+ | Name | Mapping | Location | Value | Required | Default | | | Type | | Type | | | +=========+=========+===========+=============+==========+====================+ | Example | args | 0 | string | false | teragen | | Class | | | | | | +---------+---------+-----------+-------------+----------+--------------------+ | Rows | args | 1 | number | true | unset | +---------+---------+-----------+-------------+----------+--------------------+ | Output | args | 2 | data_source | false | hdfs://ip:port/path| | Path | | | | | | +---------+---------+-----------+-------------+----------+--------------------+ | Mapper | configs | mapred. | number | false | unset | | Count | | map.tasks | | | | +---------+---------+-----------+-------------+----------+--------------------+ A "Description" field may also be added to each interface argument. To create such an interface via the REST API, provide an "interface" argument, the value of which consists of a list of JSON objects, as below: .. code-block:: [ { "name": "Example Class", "description": "Indicates which example job class should be used.", "mapping_type": "args", "location": "0", "value_type": "string", "required": false, "default": "teragen" }, ] Creating this interface would allow you to specify a configuration for any execution of the job template by passing an "interface" map similar to: .. code-block:: { "Rows": "1000000", "Mapper Count": "3", "Output Path": "hdfs://mycluster:8020/user/myuser/teragen-output" } The specified arguments would be automatically placed into the args, configs, and params for the job, according to the mapping type and location fields of each interface argument. The final ``job_configs`` map would be: .. code-block:: { "job_configs": { "configs": { "mapred.map.tasks": "3" }, "args": [ "teragen", "1000000", "hdfs://mycluster:8020/user/myuser/teragen-output" ] } } Rules for specifying an interface are as follows: - Mapping Type must be one of ``configs``, ``params``, or ``args``. Only types supported for your job type are allowed (see above.) - Location must be a string for ``configs`` and ``params``, and an integer for ``args``. The set of ``args`` locations must be an unbroken series of integers starting from 0. - Value Type must be one of ``string``, ``number``, or ``data_source``. Data sources may be passed as UUIDs or as valid paths (see above.) All values should be sent as JSON strings. (Note that booleans and null values are serialized differently in different languages. Please specify them as a string representation of the appropriate constants for your data processing engine.) - ``args`` that are not required must be given a default value. The additional one-time complexity of specifying an interface on your template allows a simpler repeated execution path, and also allows us to generate a customized form for your job in the Horizon UI. This may be particularly useful in cases in which an operator who is not a data processing job developer will be running and administering the jobs. Generation of Swift Properties for Data Sources +++++++++++++++++++++++++++++++++++++++++++++++ If swift proxy users are not configured (see :doc:`../admin/advanced-configuration-guide`) and a job is run with data source objects containing swift paths, sahara will automatically generate swift username and password configuration values based on the credentials in the data sources. If the input and output data sources are both in swift, it is expected that they specify the same credentials. The swift credentials may be set explicitly with the following configuration values: +------------------------------------+ | Name | +====================================+ | fs.swift.service.sahara.username | +------------------------------------+ | fs.swift.service.sahara.password | +------------------------------------+ Setting the swift credentials explicitly is required when passing literal swift paths as arguments instead of using data source references. When possible, use data source references as described in `Using Data Source References as Arguments`_. Additional Details for Hive jobs ++++++++++++++++++++++++++++++++ Sahara will automatically generate values for the ``INPUT`` and ``OUTPUT`` parameters required by Hive based on the specified data sources. Additional Details for Pig jobs +++++++++++++++++++++++++++++++ Sahara will automatically generate values for the ``INPUT`` and ``OUTPUT`` parameters required by Pig based on the specified data sources. For Pig jobs, ``arguments`` should be thought of as command line arguments separated by spaces and passed to the ``pig`` shell. ``Parameters`` are a shorthand and are actually translated to the arguments ``-param name=value`` Additional Details for MapReduce jobs +++++++++++++++++++++++++++++++++++++ **Important!** If the job type is MapReduce, the mapper and reducer classes *must* be specified as configuration values. Note that the UI will not prompt the user for these required values; they must be added manually with the ``Configure`` tab. Make sure to add these values with the correct names: +-----------------------------+----------------------------------------+ | Name | Example Value | +=============================+========================================+ | mapred.mapper.new-api | true | +-----------------------------+----------------------------------------+ | mapred.reducer.new-api | true | +-----------------------------+----------------------------------------+ | mapreduce.job.map.class | org.apache.oozie.example.SampleMapper | +-----------------------------+----------------------------------------+ | mapreduce.job.reduce.class | org.apache.oozie.example.SampleReducer | +-----------------------------+----------------------------------------+ Additional Details for MapReduce.Streaming jobs +++++++++++++++++++++++++++++++++++++++++++++++ **Important!** If the job type is MapReduce.Streaming, the streaming mapper and reducer classes *must* be specified. In this case, the UI *will* prompt the user to enter mapper and reducer values on the form and will take care of adding them to the job configuration with the appropriate names. If using the python client, however, be certain to add these values to the job configuration manually with the correct names: +-------------------------+---------------+ | Name | Example Value | +=========================+===============+ | edp.streaming.mapper | /bin/cat | +-------------------------+---------------+ | edp.streaming.reducer | /usr/bin/wc | +-------------------------+---------------+ Additional Details for Java jobs ++++++++++++++++++++++++++++++++ Data Source objects are not used directly with Java job types. Instead, any input or output paths must be specified as arguments at job launch either explicitly or by reference as described in `Using Data Source References as Arguments`_. Using data source references is the recommended way to pass paths to Java jobs. If configuration values are specified, they must be added to the job's Hadoop configuration at runtime. There are two methods of doing this. The simplest way is to use the **edp.java.adapt_for_oozie** option described below. The other method is to use the code from `this example `_ to explicitly load the values. The following special configuration values are read by sahara and affect how Java jobs are run: * ``edp.java.main_class`` (required) Specifies the full name of the class containing ``main(String[] args)`` A Java job will execute the **main** method of the specified main class. Any arguments set during job launch will be passed to the program through the **args** array. * ``oozie.libpath`` (optional) Specifies configuration values for the Oozie share libs, these libs can be shared by different workflows * ``edp.java.java_opts`` (optional) Specifies configuration values for the JVM * ``edp.java.adapt_for_oozie`` (optional) Specifies that sahara should perform special handling of configuration values and exit conditions. The default is **False**. If this configuration value is set to **True**, sahara will modify the job's Hadoop configuration before invoking the specified **main** method. Any configuration values specified during job launch (excluding those beginning with **edp.**) will be automatically set in the job's Hadoop configuration and will be available through standard methods. Secondly, setting this option to **True** ensures that Oozie will handle program exit conditions correctly. At this time, the following special configuration value only applies when running jobs on a cluster generated by the Cloudera plugin with the **Enable Hbase Common Lib** cluster config set to **True** (the default value): * ``edp.hbase_common_lib`` (optional) Specifies that a common Hbase lib generated by sahara in HDFS be added to the **oozie.libpath**. This for use when an Hbase application is driven from a Java job. Default is **False**. The **edp-wordcount** example bundled with sahara shows how to use configuration values, arguments, and swift data paths in a Java job type. Note that the example does not use the **edp.java.adapt_for_oozie** option but includes the code to load the configuration values explicitly. Additional Details for Shell jobs +++++++++++++++++++++++++++++++++ A shell job will execute the script specified as ``main``, and will place any files specified as ``libs`` in the same working directory (on both the filesystem and in HDFS). Command line arguments may be passed to the script through the ``args`` array, and any ``params`` values will be passed as environment variables. Data Source objects are not used directly with Shell job types but data source references may be used as described in `Using Data Source References as Arguments`_. The **edp-shell** example bundled with sahara contains a script which will output the executing user to a file specified by the first command line argument. Additional Details for Spark jobs +++++++++++++++++++++++++++++++++ Data Source objects are not used directly with Spark job types. Instead, any input or output paths must be specified as arguments at job launch either explicitly or by reference as described in `Using Data Source References as Arguments`_. Using data source references is the recommended way to pass paths to Spark jobs. Spark jobs use some special configuration values: * ``edp.java.main_class`` (required) Specifies the full name of the class containing the Java or Scala main method: + ``main(String[] args)`` for Java + ``main(args: Array[String]`` for Scala A Spark job will execute the **main** method of the specified main class. Any arguments set during job launch will be passed to the program through the **args** array. * ``edp.spark.adapt_for_swift`` (optional) If set to **True**, instructs sahara to modify the job's Hadoop configuration so that swift paths may be accessed. Without this configuration value, swift paths will not be accessible to Spark jobs. The default is **False**. Despite the name, the same principle applies to jobs which reference paths in S3-like stores. * ``edp.spark.driver.classpath`` (optional) If set to empty string sahara will use default classpath for the cluster during job execution. Otherwise this will override default value for the cluster for particular job execution. The **edp-spark** example bundled with sahara contains a Spark program for estimating Pi. Special Sahara URLs ------------------- Sahara uses custom URLs to refer to objects stored in swift, in manila, in the sahara internal database, or in S3-like storage. These URLs are usually not meant to be used outside of sahara. Sahara swift URLs passed to running jobs as input or output sources include a ".sahara" suffix on the container, for example: ``swift://container.sahara/object`` You may notice these swift URLs in job logs, however, you do not need to add the suffix to the containers yourself. sahara will add the suffix if necessary, so when using the UI or the python client you may write the above URL simply as: ``swift://container/object`` Sahara internal database URLs have the form: ``internal-db://sahara-generated-uuid`` This indicates a file object in the sahara database which has the given uuid as a key. Manila NFS filesystem reference URLS take the form: ``manila://share-uuid/path`` This format should be used when referring to a job binary or a data source stored in a manila NFS share. For both job binaries and data sources, S3 urls take the form: ``s3://bucket/path/to/object`` Despite the above URL format, the current implementation of EDP will still use the Hadoop ``s3a`` driver to access data sources. Botocore is used to access job binaries. EDP Requirements ================ The OpenStack installation and the cluster launched from sahara must meet the following minimum requirements in order for EDP to function: OpenStack Services ------------------ When a Hadoop job is executed, binaries are first uploaded to a cluster node and then moved from the node local filesystem to HDFS. Therefore, there must be an instance of HDFS available to the nodes in the sahara cluster. If the swift service *is not* running in the OpenStack installation: + Job binaries may only be stored in the sahara internal database + Data sources require a long-running HDFS If the swift service *is* running in the OpenStack installation: + Job binaries may be stored in swift or the sahara internal database + Data sources may be in swift or a long-running HDFS Cluster Processes ----------------- Requirements for EDP support depend on the EDP job type and plugin used for the cluster. For example a Vanilla sahara cluster must run at least one instance of these processes to support EDP: * For Hadoop version 1: + jobtracker + namenode + oozie + tasktracker + datanode * For Hadoop version 2: + namenode + datanode + resourcemanager + nodemanager + historyserver + oozie + spark history server EDP Technical Considerations ============================ There are several things in EDP which require attention in order to work properly. They are listed on this page. Transient Clusters ------------------ EDP allows running jobs on transient clusters. In this case the cluster is created specifically for the job and is shut down automatically once the job is finished. Two config parameters control the behaviour of periodic clusters: * periodic_enable - if set to 'false', sahara will do nothing to a transient cluster once the job it was created for is completed. If it is set to 'true', then the behaviour depends on the value of the next parameter. * use_identity_api_v3 - set it to 'false' if your OpenStack installation does not provide keystone API v3. In that case sahara will not terminate unneeded clusters. Instead it will set their state to 'AwaitingTermination' meaning that they could be manually deleted by a user. If the parameter is set to 'true', sahara will itself terminate the cluster. The limitation is caused by lack of 'trusts' feature in Keystone API older than v3. If both parameters are set to 'true', sahara works with transient clusters in the following manner: 1. When a user requests for a job to be executed on a transient cluster, sahara creates such a cluster. 2. Sahara drops the user's credentials once the cluster is created but prior to that it creates a trust allowing it to operate with the cluster instances in the future without user credentials. 3. Once a cluster is not needed, sahara terminates its instances using the stored trust. sahara drops the trust after that. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/features.rst0000664000175000017500000002772200000000000020465 0ustar00zuulzuul00000000000000Features Overview ================= This page highlights some of the most prominent features available in sahara. The guidance provided here is primarily focused on the runtime aspects of sahara. For discussions about configuring the sahara server processes please see the :doc:`../admin/configuration-guide` and :doc:`../admin/advanced-configuration-guide`. Anti-affinity ------------- One of the problems with running data processing applications on OpenStack is the inability to control where an instance is actually running. It is not always possible to ensure that two new virtual machines are started on different physical machines. As a result, any replication within the cluster is not reliable because all replicas may be co-located on one physical machine. To remedy this, sahara provides the anti-affinity feature to explicitly command all instances of the specified processes to spawn on different Compute nodes. This is especially useful for Hadoop data node processes to increase HDFS replica reliability. Starting with the Juno release, sahara can create server groups with the ``anti-affinity`` policy to enable this feature. Sahara creates one server group per cluster and assigns all instances with affected processes to this server group. Refer to the :nova-doc:`Nova Anti-Affinity documentation ` on how server group affinity filters work. This feature is supported by all plugins out of the box, and can be enabled during the cluster template creation. Block Storage support --------------------- OpenStack Block Storage (cinder) can be used as an alternative for ephemeral drives on instances. Using Block Storage volumes increases the reliability of data which is important for HDFS services. A user can set how many volumes will be attached to each instance in a node group and the size of each volume. All volumes are attached during cluster creation and scaling operations. If volumes are used for the HDFS storage it's important to make sure that the linear read-write operations as well as IOpS level are high enough to handle the workload. Volumes placed on the same compute host provide a higher level of performance. In some cases cinder volumes can be backed by a distributed storage like Ceph. In this type of installation it's important to make sure that the network latency and speed do not become a blocker for HDFS. Distributed storage solutions usually provide their own replication mechanism. HDFS replication should be disabled so that it does not generate redundant traffic across the cloud. Cluster scaling --------------- Cluster scaling allows users to change the number of running instances in a cluster without needing to recreate the cluster. Users may increase or decrease the number of instances in node groups or add new node groups to existing clusters. If a cluster fails to scale properly, all changes will be rolled back. Data locality ------------- For optimal performance, it is best for data processing applications to work on data local to the same rack, OpenStack Compute node, or virtual machine. Hadoop supports a data locality feature and can schedule jobs to task tracker nodes that are local for the input stream. In this manner the task tracker nodes can communicate directly with the local data nodes. Sahara supports topology configuration for HDFS and Object Storage data sources. For more information on configuring this option please see the :ref:`data_locality_configuration` documentation. Volume-to-instance locality --------------------------- Having an instance and an attached volume on the same physical host can be very helpful in order to achieve high-performance disk I/O operations. To achieve this, sahara provides access to the Block Storage volume instance locality functionality. For more information on using volume instance locality with sahara, please see the :ref:`volume_instance_locality_configuration` documentation. Distributed Mode ---------------- The :doc:`../install/installation-guide` suggests launching sahara in distributed mode with ``sahara-api`` and ``sahara-engine`` processes potentially running on several machines simultaneously. Running in distributed mode allows sahara to offload intensive tasks to the engine processes while keeping the API process free to handle requests. For an expanded discussion of configuring sahara to run in distributed mode please see the :ref:`distributed-mode-configuration` documentation. Hadoop HDFS and YARN High Availability -------------------------------------- Currently HDFS and YARN HA are supported with the HDP 2.4 plugin and CDH 5.7 plugins. Hadoop HDFS and YARN High Availability provide an architecture to ensure that HDFS or YARN will continue to work in the result of an active namenode or resourcemanager failure. They use 2 namenodes and 2 resourcemanagers in an active/passive state to provide this availability. In the HDP 2.4 plugin, the feature can be enabled through dashboard in the Cluster Template creation form. High availability is achieved by using a set of journalnodes, Zookeeper servers, and ZooKeeper Failover Controllers (ZKFC), as well as additional configuration changes to HDFS and other services that use HDFS. In the CDH 5.7 plugin, HA for HDFS and YARN is enabled through adding several HDFS_JOURNALNODE roles in the node group templates of cluster template. The HDFS HA is enabled when HDFS_JOURNALNODE roles are added and the roles setup meets below requirements: * HDFS_JOURNALNODE number is odd, and at least 3. * Zookeeper is enabled. * NameNode and SecondaryNameNode are on different physical hosts by setting anti-affinity. * Cluster has both ResourceManager and StandByResourceManager. In this case, the original SecondaryNameNode node will be used as the Standby NameNode. Networking support ------------------ Sahara supports neutron implementations of OpenStack Networking. Object Storage support ---------------------- Sahara can use OpenStack Object Storage (swift) to store job binaries and data sources utilized by its job executions and clusters. In order to leverage this support within Hadoop, including using Object Storage for data sources for EDP, Hadoop requires the application of a patch. For additional information about enabling this support, including patching Hadoop and configuring sahara, please refer to the :doc:`hadoop-swift` documentation. Shared Filesystem support ------------------------- Sahara can also use NFS shares through the OpenStack Shared Filesystem service (manila) to store job binaries and data sources. See :doc:`edp` for more information on this feature. Orchestration support --------------------- Sahara may use the `OpenStack Orchestration engine `_ (heat) to provision nodes for clusters. For more information about enabling Orchestration usage in sahara please see :ref:`orchestration-configuration`. DNS support ----------- Sahara can resolve hostnames of cluster instances by using DNS. For this Sahara uses designate. For additional details see :doc:`../admin/advanced-configuration-guide`. Kerberos support ---------------- You can protect your HDP or CDH cluster using MIT Kerberos security. To get more details about this, please, see documentation for the appropriate plugin. Plugin Capabilities ------------------- The following table provides a plugin capability matrix: +--------------------------+---------+----------+----------+-------+ | Feature/Plugin | Vanilla | HDP | Cloudera | Spark | +==========================+=========+==========+==========+=======+ | Neutron network | x | x | x | x | +--------------------------+---------+----------+----------+-------+ | Cluster Scaling | x | x | x | x | +--------------------------+---------+----------+----------+-------+ | Swift Integration | x | x | x | x | +--------------------------+---------+----------+----------+-------+ | Cinder Support | x | x | x | x | +--------------------------+---------+----------+----------+-------+ | Data Locality | x | x | x | x | +--------------------------+---------+----------+----------+-------+ | DNS | x | x | x | x | +--------------------------+---------+----------+----------+-------+ | Kerberos | \- | x | x | \- | +--------------------------+---------+----------+----------+-------+ | HDFS HA | \- | x | x | \- | +--------------------------+---------+----------+----------+-------+ | EDP | x | x | x | x | +--------------------------+---------+----------+----------+-------+ Security group management ------------------------- Security groups are sets of IP filter rules that are applied to an instance's networking. They are project specified, and project members can edit the default rules for their group and add new rules sets. All projects have a "default" security group, which is applied to instances that have no other security group defined. Unless changed, this security group denies all incoming traffic. Sahara allows you to control which security groups will be used for created instances. This can be done by providing the ``security_groups`` parameter for the node group or node group template. The default for this option is an empty list, which will result in the default project security group being used for the instances. Sahara may also create a security group for instances in the node group automatically. This security group will only contain open ports for required instance processes and the sahara engine. This option is useful for development and for when your installation is secured from outside environments. For production environments we recommend controlling the security group policy manually. Shared and protected resources support -------------------------------------- Sahara allows you to create resources that can be shared across projects and protected from modifications. To provide this feature all sahara objects that can be accessed through REST API have ``is_public`` and ``is_protected`` boolean fields. They can be initially created with enabled ``is_public`` and ``is_protected`` parameters or these parameters can be updated after creation. Both fields are set to ``False`` by default. If some object has its ``is_public`` field set to ``True``, it means that it's visible not only from the project in which it was created, but from any other projects too. If some object has its ``is_protected`` field set to ``True``, it means that it can not be modified (updated, scaled, canceled or deleted) unless this field is set to ``False``. Public objects created in one project can be used from other projects (for example, a cluster can be created from a public cluster template which is created in another project), but modification operations are possible only from the project in which object was created. Data source placeholders support -------------------------------- Sahara supports special strings that can be used in data source URLs. These strings will be replaced with appropriate values during job execution which allows the use of the same data source as an output multiple times. There are 2 types of string currently supported: * ``%JOB_EXEC_ID%`` - this string will be replaced with the job execution ID. * ``%RANDSTR(len)%`` - this string will be replaced with random string of lowercase letters of length ``len``. ``len`` must be less than 1024. After placeholders are replaced, the real URLs are stored in the ``data_source_urls`` field of the job execution object. This is used later to find objects created by a particular job run. Keypair replacement ------------------- A cluster allows users to create a new keypair to access to the running cluster when the cluster's keypair is deleted. But the name of new keypair should be same as the deleted one, and the new keypair will be available for cluster scaling. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/hadoop-swift.rst0000664000175000017500000001241100000000000021240 0ustar00zuulzuul00000000000000.. _swift-integration-label: Swift Integration ================= Hadoop and Swift integration are the essential continuation of the Hadoop/OpenStack marriage. The key component to making this marriage work is the Hadoop Swift filesystem implementation. Although this implementation has been merged into the upstream Hadoop project, Sahara maintains a version with the most current features enabled. * The original Hadoop patch can be found at https://issues.apache.org/jira/browse/HADOOP-8545 * The most current Sahara maintained version of this patch can be found in the `Sahara Extra repository `_ * The latest compiled version of the jar for this component can be downloaded from https://tarballs.openstack.org/sahara-extra/dist/hadoop-openstack/master/ Now the latest version of this jar (which uses Keystone API v3) is used in the plugins' images automatically during build of these images. But for Ambari plugin we need to explicitly put this jar into /opt directory of the base image **before** cluster launching. Hadoop patching --------------- You may build the jar file yourself by choosing the latest patch from the Sahara Extra repository and using Maven to build with the pom.xml file provided. Or you may get the latest jar pre-built at https://tarballs.openstack.org/sahara-extra/dist/hadoop-openstack/master/ You will need to put this file into the hadoop libraries (e.g. /usr/lib/share/hadoop/lib, it depends on the plugin which you use) on each ResourceManager and NodeManager node (for Hadoop 2.x) in the cluster. Hadoop configurations --------------------- In general, when Sahara runs a job on a cluster it will handle configuring the Hadoop installation. In cases where a user might require more in-depth configuration all the data is set in the ``core-site.xml`` file on the cluster instances using this template: .. code-block:: ${name} + ${config} ${value} ${not mandatory description} There are two types of configs here: 1. General. The ``${name}`` in this case equals to ``fs.swift``. Here is the list of ``${config}``: * ``.impl`` - Swift FileSystem implementation. The ${value} is ``org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem`` * ``.connect.timeout`` - timeout for all connections by default: 15000 * ``.socket.timeout`` - how long the connection waits for responses from servers. by default: 60000 * ``.connect.retry.count`` - connection retry count for all connections. by default: 3 * ``.connect.throttle.delay`` - delay in millis between bulk (delete, rename, copy operations). by default: 0 * ``.blocksize`` - blocksize for filesystem. By default: 32Mb * ``.partsize`` - the partition size for uploads. By default: 4608*1024Kb * ``.requestsize`` - request size for reads in KB. By default: 64Kb 2. Provider-specific. The patch for Hadoop supports different cloud providers. The ``${name}`` in this case equals to ``fs.swift.service.${provider}``. Here is the list of ``${config}``: * ``.auth.url`` - authorization URL * ``.auth.endpoint.prefix`` - prefix for the service url, e.g. ``/AUTH_`` * ``.tenant`` - project name * ``.username`` * ``.password`` * ``.domain.name`` - Domains can be used to specify users who are not in the project specified. * ``.domain.id`` - You can also specify domain using id. * ``.trust.id`` - Trusts are optionally used to scope the authentication tokens of the supplied user. * ``.http.port`` * ``.https.port`` * ``.region`` - Swift region is used when cloud has more than one Swift installation. If region param is not set first region from Keystone endpoint list will be chosen. If region param not found exception will be thrown. * ``.location-aware`` - turn On location awareness. Is false by default * ``.apikey`` * ``.public`` Example ------- For this example it is assumed that you have setup a Hadoop instance with a valid configuration and the Swift filesystem component. Furthermore there is assumed to be a Swift container named ``integration`` holding an object named ``temp``, as well as a Keystone user named ``admin`` with a password of ``swordfish``. The following example illustrates how to copy an object to a new location in the same container. We will use Hadoop's ``distcp`` command (http://hadoop.apache.org/docs/stable/hadoop-distcp/DistCp.html) to accomplish the copy. Note that the service provider for our Swift access is ``sahara``, and that we will not need to specify the project of our Swift container as it will be provided in the Hadoop configuration. Swift paths are expressed in Hadoop according to the following template: ``swift://${container}.${provider}/${object}``. For our example source this will appear as ``swift://integration.sahara/temp``. Let's run the job: .. sourcecode:: console $ hadoop distcp -D fs.swift.service.sahara.username=admin \ -D fs.swift.service.sahara.password=swordfish \ swift://integration.sahara/temp swift://integration.sahara/temp1 After that just confirm that ``temp1`` has been created in our ``integration`` container. Limitations ----------- **Note:** Please note that container names should be a valid URI. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/index.rst0000664000175000017500000000075600000000000017754 0ustar00zuulzuul00000000000000========== User Guide ========== General concepts and guides =========================== .. toctree:: :maxdepth: 2 overview quickstart dashboard-user-guide features registering-image statuses sahara-on-ironic Plugins ======= .. toctree:: :maxdepth: 2 plugins Elastic Data Processing ======================= .. toctree:: :maxdepth: 2 edp edp-s3 Guest Images ============ .. toctree:: :maxdepth: 2 building-guest-images hadoop-swift ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/overview.rst0000664000175000017500000000631000000000000020503 0ustar00zuulzuul00000000000000Getting Started =============== Clusters -------- A cluster deployed by sahara consists of node groups. Node groups vary by their role, parameters and number of machines. The picture below illustrates an example of a Hadoop cluster consisting of 3 node groups each having a different role (set of processes). .. image:: ../images/hadoop-cluster-example.jpg Node group parameters include Hadoop parameters like ``io.sort.mb`` or ``mapred.child.java.opts``, and several infrastructure parameters like the flavor for instances or storage location (ephemeral drive or cinder volume). A cluster is characterized by its node groups and its parameters. Like a node group, a cluster has data processing framework and infrastructure parameters. An example of a cluster-wide Hadoop parameter is ``dfs.replication``. For infrastructure, an example could be image which will be used to launch cluster instances. Templates --------- In order to simplify cluster provisioning sahara employs the concept of templates. There are two kinds of templates: node group templates and cluster templates. The former is used to create node groups, the latter - clusters. Essentially templates have the very same parameters as corresponding entities. Their aim is to remove the burden of specifying all of the required parameters each time a user wants to launch a cluster. In the REST interface, templates have extended functionality. First you can specify node-scoped parameters, they will work as defaults for node groups. Also with the REST interface, during cluster creation a user can override template parameters for both cluster and node groups. Templates are portable - they can be exported to JSON files and imported either on the same deployment or on another one. To import an exported template, replace the placeholder values with appropriate ones. This can be accomplished easily through the CLI or UI, or manually editing the template file. Provisioning Plugins -------------------- A provisioning plugin is a component responsible for provisioning a data processing cluster. Generally each plugin is capable of provisioning a specific data processing framework or Hadoop distribution. Also the plugin can install management and/or monitoring tools for a cluster. Since framework configuration parameters vary depending on the distribution and the version, templates are always plugin and version specific. A template cannot be used if the plugin, or framework, versions are different than the ones they were created for. You may find the list of available plugins on that page: :doc:`plugins` Image Registry -------------- OpenStack starts VMs based on a pre-built image with an installed OS. The image requirements for sahara depend on the plugin and data processing framework version. Some plugins require just a basic cloud image and will install the framework on the instance from scratch. Some plugins might require images with pre-installed frameworks or Hadoop distributions. The Sahara Image Registry is a feature which helps filter out images during cluster creation. See :doc:`registering-image` for details on how to work with Image Registry. Features -------- Sahara has several interesting features. The full list could be found here: :doc:`features` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/plugins.rst0000664000175000017500000000363100000000000020321 0ustar00zuulzuul00000000000000Provisioning Plugins ==================== This page lists all available provisioning plugins. In general a plugin enables sahara to deploy a specific data processing framework (for example, Hadoop) or distribution, and allows configuration of topology and management/monitoring tools. The plugins currently developed as part of the official Sahara project are: * :sahara-plugin-ambari-doc:`Ambari Plugin <>` - deploys Hortonworks Data Platform * :sahara-plugin-cdh-doc:`CDH Plugin <>` - deploys Cloudera Hadoop * :sahara-plugin-mapr-doc:`MapR Plugin <>` - deploys MapR plugin with MapR File System * :sahara-plugin-spark-doc:`Spark Plugin <>` - deploys Apache Spark with Cloudera HDFS * :sahara-plugin-storm-doc:`Storm Plugin <>` - deploys Apache Storm * :sahara-plugin-vanilla-doc:`Vanilla Plugin <>` - deploys Vanilla Apache Hadoop Managing plugins ---------------- Since the Newton release a project admin can configure plugins by specifying additional values for plugin's labels. To disable a plugin (Vanilla Apache Hadoop, for example), the admin can run the following command: .. code-block:: cat update_configs.json { "plugin_labels": { "enabled": { "status": true } } } openstack dataprocessing plugin update vanilla update_configs.json Additionally, specific versions can be disabled by the following command: .. code-block:: cat update_configs.json { "version_labels": { "2.7.1": { "enabled": { "status": true } } } } openstack dataprocessing plugin update vanilla update_configs.json Finally, to see all labels of a specific plugin and to see the current status of the plugin (is it stable or not, deprecation status) the following command can be executed from the CLI: .. code-block:: openstack dataprocessing plugin show vanilla The same actions are available from UI respectively. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/quickstart.rst0000664000175000017500000006672500000000000021047 0ustar00zuulzuul00000000000000================ Quickstart guide ================ Launching a cluster via Sahara CLI commands =========================================== This guide will help you setup a vanilla Hadoop cluster using a combination of OpenStack command line tools and the sahara :doc:`REST API <../reference/restapi>`. 1. Install sahara ----------------- * If you want to hack the code follow :doc:`../contributor/development-environment`. OR * If you just want to install and use sahara follow :doc:`../install/installation-guide`. 2. Identity service configuration --------------------------------- To use the OpenStack command line tools you should specify environment variables with the configuration details for your OpenStack installation. The following example assumes that the Identity service is at ``127.0.0.1:5000``, with a user ``admin`` in the ``admin`` project whose password is ``nova``: .. sourcecode:: console $ export OS_AUTH_URL=http://127.0.0.1:5000/v2.0/ $ export OS_PROJECT_NAME=admin $ export OS_USERNAME=admin $ export OS_PASSWORD=nova 3. Upload an image to the Image service --------------------------------------- You will need to upload a virtual machine image to the OpenStack Image service. You can build the images yourself. This guide uses the latest generated Ubuntu vanilla image, referred to as ``sahara-vanilla-latest-ubuntu.qcow2``, and the latest version of vanilla plugin as an example. Build an image which works for the specific plugin. Please refer to :ref:`building-guest-images-label` and to the plugin-specific documentation. Upload the generated image into the OpenStack Image service: .. code-block:: $ openstack image create sahara-vanilla-latest-ubuntu --disk-format qcow2 \ --container-format bare --file sahara-vanilla-latest-ubuntu.qcow2 +------------------+--------------------------------------+ | Field | Value | +------------------+--------------------------------------+ | checksum | 3da49911332fc46db0c5fb7c197e3a77 | | container_format | bare | | created_at | 2016-02-29T10:15:04.000000 | | deleted | False | | deleted_at | None | | disk_format | qcow2 | | id | 71b9eeac-c904-4170-866a-1f833ea614f3 | | is_public | False | | min_disk | 0 | | min_ram | 0 | | name | sahara-vanilla-latest-ubuntu | | owner | 057d23cddb864759bfa61d730d444b1f | | properties | | | protected | False | | size | 1181876224 | | status | active | | updated_at | 2016-02-29T10:15:41.000000 | | virtual_size | None | +------------------+--------------------------------------+ Remember the image name or save the image ID. This will be used during the image registration with sahara. You can get the image ID using the ``openstack`` command line tool as follows: .. code-block:: $ openstack image list --property name=sahara-vanilla-latest-ubuntu +--------------------------------------+------------------------------+ | ID | Name | +--------------------------------------+------------------------------+ | 71b9eeac-c904-4170-866a-1f833ea614f3 | sahara-vanilla-latest-ubuntu | +--------------------------------------+------------------------------+ 4. Register the image with the sahara image registry ---------------------------------------------------- Now you will begin to interact with sahara by registering the virtual machine image in the sahara image registry. Register the image with the username ``ubuntu``. .. note:: The username will vary depending on the source image used. For more information, refer to the :doc:`registering-image` section. .. code-block:: console $ openstack dataprocessing image register sahara-vanilla-latest-ubuntu \ --username ubuntu Tag the image to inform sahara about the plugin and the version with which it shall be used. .. note:: For the steps below and the rest of this guide, substitute ```` with the appropriate version of your plugin. .. code-block:: $ openstack dataprocessing image tags add sahara-vanilla-latest-ubuntu \ --tags vanilla +-------------+--------------------------------------+ | Field | Value | +-------------+--------------------------------------+ | Description | None | | Id | 71b9eeac-c904-4170-866a-1f833ea614f3 | | Name | sahara-vanilla-latest-ubuntu | | Status | ACTIVE | | Tags | , vanilla | | Username | ubuntu | +-------------+--------------------------------------+ 5. Create node group templates ------------------------------ Node groups are the building blocks of clusters in sahara. Before you can begin provisioning clusters you must define a few node group templates to describe node group configurations. You can get information about available plugins with the following command: .. sourcecode:: console $ openstack dataprocessing plugin list Also you can get information about available services for a particular plugin with the ``plugin show`` command. For example: .. code-block:: $ openstack dataprocessing plugin show vanilla --plugin-version +---------------------+-----------------------------------------------------------------------------------------------------------------------+ | Field | Value | +---------------------+-----------------------------------------------------------------------------------------------------------------------+ | Description | The Apache Vanilla plugin provides the ability to launch upstream Vanilla Apache Hadoop cluster without any | | | management consoles. It can also deploy the Oozie component. | | Name | vanilla | | Required image tags | , vanilla | | Title | Vanilla Apache Hadoop | | | | | Service: | Available processes: | | | | | HDFS | datanode, namenode, secondarynamenode | | Hadoop | | | Hive | hiveserver | | JobFlow | oozie | | Spark | spark history server | | MapReduce | historyserver | | YARN | nodemanager, resourcemanager | +---------------------+-----------------------------------------------------------------------------------------------------------------------+ .. note:: These commands assume that floating IP addresses are being used. For more details on floating IP please see :ref:`floating_ip_management`. Create a master node group template with the command: .. code-block:: $ openstack dataprocessing node group template create \ --name vanilla-default-master --plugin vanilla \ --plugin-version --processes namenode resourcemanager \ --flavor 2 --auto-security-group --floating-ip-pool +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | Auto security group | True | | Availability zone | None | | Flavor id | 2 | | Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 | | Id | 0f066e14-9a73-4379-bbb4-9d9347633e31 | | Is boot from volume | False | | Is default | False | | Is protected | False | | Is proxy gateway | False | | Is public | False | | Name | vanilla-default-master | | Node processes | namenode, resourcemanager | | Plugin name | vanilla | | Security groups | None | | Use autoconfig | False | | Version | | | Volumes per node | 0 | +---------------------+--------------------------------------+ Create a worker node group template with the command: .. code-block:: $ openstack dataprocessing node group template create \ --name vanilla-default-worker --plugin vanilla \ --plugin-version --processes datanode nodemanager \ --flavor 2 --auto-security-group --floating-ip-pool +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | Auto security group | True | | Availability zone | None | | Flavor id | 2 | | Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 | | Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc | | Is boot from volume | False | | Is default | False | | Is protected | False | | Is proxy gateway | False | | Is public | False | | Name | vanilla-default-worker | | Node processes | datanode, nodemanager | | Plugin name | vanilla | | Security groups | None | | Use autoconfig | False | | Version | | | Volumes per node | 0 | +---------------------+--------------------------------------+ You can also create node group templates setting a flag --boot-from-volume. This will tell the node group to boot its instances from a volume instead of the image. This feature allows for easier live migrations and improved performance. .. code-block:: $ openstack dataprocessing node group template create \ --name vanilla-default-worker --plugin vanilla \ --plugin-version --processes datanode nodemanager \ --flavor 2 --auto-security-group --floating-ip-pool \ --boot-from-volume +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | Auto security group | True | | Availability zone | None | | Flavor id | 2 | | Floating ip pool | dbd8d1aa-6e8e-4a35-a77b-966c901464d5 | | Id | 6546bf44-0590-4539-bfcb-99f8e2c11efc | | Is boot from volume | True | | Is default | False | | Is protected | False | | Is proxy gateway | False | | Is public | False | | Name | vanilla-default-worker | | Node processes | datanode, nodemanager | | Plugin name | vanilla | | Security groups | None | | Use autoconfig | False | | Version | | | Volumes per node | 0 | +---------------------+--------------------------------------+ Alternatively you can create node group templates from JSON files: If your environment does not use floating IPs, omit defining floating IP in the template below. Sample templates can be found here: `Sample Templates `_ Create a file named ``my_master_template_create.json`` with the following content: .. code-block:: json { "plugin_name": "vanilla", "hadoop_version": "", "node_processes": [ "namenode", "resourcemanager" ], "name": "vanilla-default-master", "floating_ip_pool": "", "flavor_id": "2", "auto_security_group": true } Create a file named ``my_worker_template_create.json`` with the following content: .. code-block:: json { "plugin_name": "vanilla", "hadoop_version": "", "node_processes": [ "nodemanager", "datanode" ], "name": "vanilla-default-worker", "floating_ip_pool": "", "flavor_id": "2", "auto_security_group": true } Use the ``openstack`` client to upload the node group templates: .. code-block:: console $ openstack dataprocessing node group template create \ --json my_master_template_create.json $ openstack dataprocessing node group template create \ --json my_worker_template_create.json List the available node group templates to ensure that they have been added properly: .. code-block:: $ openstack dataprocessing node group template list --name vanilla-default +------------------------+--------------------------------------+-------------+--------------------+ | Name | Id | Plugin name | Version | +------------------------+--------------------------------------+-------------+--------------------+ | vanilla-default-master | 0f066e14-9a73-4379-bbb4-9d9347633e31 | vanilla | | | vanilla-default-worker | 6546bf44-0590-4539-bfcb-99f8e2c11efc | vanilla | | +------------------------+--------------------------------------+-------------+--------------------+ Remember the name or save the ID for the master and worker node group templates, as they will be used during cluster template creation. For example: * vanilla-default-master: ``0f066e14-9a73-4379-bbb4-9d9347633e31`` * vanilla-default-worker: ``6546bf44-0590-4539-bfcb-99f8e2c11efc`` 6. Create a cluster template ---------------------------- The last step before provisioning the cluster is to create a template that describes the node groups of the cluster. Create a cluster template with the command: .. code-block:: $ openstack dataprocessing cluster template create \ --name vanilla-default-cluster \ --node-groups vanilla-default-master:1 vanilla-default-worker:3 +----------------+----------------------------------------------------+ | Field | Value | +----------------+----------------------------------------------------+ | Anti affinity | | | Description | None | | Id | 9d871ebd-88a9-40af-ae3e-d8c8f292401c | | Is default | False | | Is protected | False | | Is public | False | | Name | vanilla-default-cluster | | Node groups | vanilla-default-master:1, vanilla-default-worker:3 | | Plugin name | vanilla | | Use autoconfig | False | | Version | | +----------------+----------------------------------------------------+ Alternatively you can create cluster template from JSON file: Create a file named ``my_cluster_template_create.json`` with the following content: .. code-block:: json { "plugin_name": "vanilla", "hadoop_version": "", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "6546bf44-0590-4539-bfcb-99f8e2c11efc" }, { "name": "master", "count": 1, "node_group_template_id": "0f066e14-9a73-4379-bbb4-9d9347633e31" } ], "name": "vanilla-default-cluster", "cluster_configs": {} } Upload the cluster template using the ``openstack`` command line tool: .. sourcecode:: console $ openstack dataprocessing cluster template create --json my_cluster_template_create.json Remember the cluster template name or save the cluster template ID for use in the cluster provisioning command. The cluster ID can be found in the output of the creation command or by listing the cluster templates as follows: .. code-block:: $ openstack dataprocessing cluster template list --name vanilla-default +-------------------------+--------------------------------------+-------------+--------------------+ | Name | Id | Plugin name | Version | +-------------------------+--------------------------------------+-------------+--------------------+ | vanilla-default-cluster | 9d871ebd-88a9-40af-ae3e-d8c8f292401c | vanilla | | +-------------------------+--------------------------------------+-------------+--------------------+ 7. Create cluster ----------------- Now you are ready to provision the cluster. This step requires a few pieces of information that can be found by querying various OpenStack services. Create a cluster with the command: .. code-block:: $ openstack dataprocessing cluster create --name my-cluster-1 \ --cluster-template vanilla-default-cluster --user-keypair my_stack \ --neutron-network private --image sahara-vanilla-latest-ubuntu +----------------------------+----------------------------------------------------+ | Field | Value | +----------------------------+----------------------------------------------------+ | Anti affinity | | | Cluster template id | 9d871ebd-88a9-40af-ae3e-d8c8f292401c | | Description | | | Id | 1f0dc6f7-6600-495f-8f3a-8ac08cdb3afc | | Image | 71b9eeac-c904-4170-866a-1f833ea614f3 | | Is protected | False | | Is public | False | | Is transient | False | | Name | my-cluster-1 | | Neutron management network | fabe9dae-6fbd-47ca-9eb1-1543de325efc | | Node groups | vanilla-default-master:1, vanilla-default-worker:3 | | Plugin name | vanilla | | Status | Validating | | Use autoconfig | False | | User keypair id | my_stack | | Version | | +----------------------------+----------------------------------------------------+ Alternatively you can create a cluster template from a JSON file: Create a file named ``my_cluster_create.json`` with the following content: .. code-block:: json { "name": "my-cluster-1", "plugin_name": "vanilla", "hadoop_version": "", "cluster_template_id" : "9d871ebd-88a9-40af-ae3e-d8c8f292401c", "user_keypair_id": "my_stack", "default_image_id": "71b9eeac-c904-4170-866a-1f833ea614f3", "neutron_management_network": "fabe9dae-6fbd-47ca-9eb1-1543de325efc" } The parameter ``user_keypair_id`` with the value ``my_stack`` is generated by creating a keypair. You can create your own keypair in the OpenStack Dashboard, or through the ``openstack`` command line client as follows: .. sourcecode:: console $ openstack keypair create my_stack --public-key $PATH_TO_PUBLIC_KEY If sahara is configured to use neutron for networking, you will also need to include the ``--neutron-network`` argument in the ``cluster create`` command or the ``neutron_management_network`` parameter in ``my_cluster_create.json``. If your environment does not use neutron, you should omit these arguments. You can determine the neutron network id with the following command: .. sourcecode:: console $ openstack network list Create and start the cluster: .. sourcecode:: console $ openstack dataprocessing cluster create --json my_cluster_create.json Verify the cluster status by using the ``openstack`` command line tool as follows: .. code-block:: $ openstack dataprocessing cluster show my-cluster-1 -c Status +--------+--------+ | Field | Value | +--------+--------+ | Status | Active | +--------+--------+ The cluster creation operation may take several minutes to complete. During this time the "status" returned from the previous command may show states other than ``Active``. A cluster also can be created with the ``wait`` flag. In that case the cluster creation command will not be finished until the cluster is moved to the ``Active`` state. 8. Run a MapReduce job to check Hadoop installation --------------------------------------------------- Check that your Hadoop installation is working properly by running an example job on the cluster manually. * Login to the NameNode (usually the master node) via ssh with the ssh-key used above: .. sourcecode:: console $ ssh -i my_stack.pem ubuntu@ * Switch to the hadoop user: .. sourcecode:: console $ sudo su hadoop * Go to the shared hadoop directory and run the simplest MapReduce example: .. sourcecode:: console $ cd /opt/hadoop-/share/hadoop/mapreduce $ /opt/hadoop-/bin/hadoop jar hadoop-mapreduce-examples-.jar pi 10 100 Congratulations! Your Hadoop cluster is ready to use, running on your OpenStack cloud. Elastic Data Processing (EDP) ============================= Job Binaries are the entities you define/upload the source code (mains and libraries) for your job. First you need to download your binary file or script to swift container and register your file in Sahara with the command: .. code:: bash (openstack) dataprocessing job binary create --url "swift://integration.sahara/hive.sql" \ --username username --password password --description "My first job binary" hive-binary Data Sources ------------ Data Sources are entities where the input and output from your jobs are housed. You can create data sources which are related to Swift, Manila or HDFS. You need to set the type of data source (swift, hdfs, manila, maprfs), name and url. The next two commands will create input and output data sources in swift. .. code:: bash $ openstack dataprocessing data source create --type swift --username admin --password admin \ --url "swift://integration.sahara/input.txt" input $ openstack dataprocessing data source create --type swift --username admin --password admin \ --url "swift://integration.sahara/output.txt" output If you want to create data sources in hdfs, use valid hdfs urls: .. code:: bash $ openstack dataprocessing data source create --type hdfs --url "hdfs://tmp/input.txt" input $ openstack dataprocessing data source create --type hdfs --url "hdfs://tmp/output.txt" output Job Templates (Jobs in API) --------------------------- In this step you need to create a job template. You have to set the type of the job template using the `type` parameter. Choose the main library using the job binary which was created in the previous step and set a name for the job template. Example of the command: .. code:: bash $ openstack dataprocessing job template create --type Hive \ --name hive-job-template --main hive-binary Jobs (Job Executions in API) ---------------------------- This is the last step in our guide. In this step you need to launch your job. You need to pass the following arguments: * The name or ID of input/output data sources for the job * The name or ID of the job template * The name or ID of the cluster on which to run the job For instance: .. code:: bash $ openstack dataprocessing job execute --input input --output output \ --job-template hive-job-template --cluster my-first-cluster You can check status of your job with the command: .. code:: bash $ openstack dataprocessing job show Once the job is marked as successful you can check the output data source. It will contain the output data of this job. Congratulations! ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/registering-image.rst0000664000175000017500000000207500000000000022243 0ustar00zuulzuul00000000000000Registering an Image ==================== Sahara deploys a cluster of machines using images stored in Glance. Each plugin has its own requirements on the image contents (see specific plugin documentation for details). Two general requirements for an image are to have the cloud-init and the ssh-server packages installed. Sahara requires the images to be registered in the Sahara Image Registry. A registered image must have two properties set: * username - a name of the default cloud-init user. * tags - certain tags mark image to be suitable for certain plugins. The tags depend on the plugin used, you can find required tags in the plugin's documentations. The default username specified for these images is different for each distribution: +--------------+------------+ | OS | username | +==============+============+ | Ubuntu 14.04 | ubuntu | +--------------+------------+ | Ubuntu 16.04 | ubuntu | +--------------+------------+ | Fedora | fedora | +--------------+------------+ | CentOS 7.x | centos | +--------------+------------+ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/sahara-on-ironic.rst0000664000175000017500000000656400000000000022002 0ustar00zuulzuul00000000000000How to run a Sahara cluster on bare metal servers ================================================= Hadoop clusters are designed to store and analyze extremely large amounts of unstructured data in distributed computing environments. Sahara enables you to boot Hadoop clusters in both virtual and bare metal environments. When Booting Hadoop clusters with Sahara on bare metal servers, you benefit from the bare metal performance with self-service resource provisioning. 1. Create a new OpenStack environment using Devstack as described in the :devstack-doc:`Devstack Guide <>` 2. Install Ironic as described in the :ironic-doc:`Ironic Installation Guide ` 3. Install Sahara as described in the `Sahara Installation Guide <../install/installation-guide.html>`_ 4. Build the Sahara image and prepare it for uploading to Glance: - Build an image for Sahara plugin which supports baremetal deployment. Refer to the :ref:`building-baremetal-images-label` section. - Convert the qcow2 image format to the raw format. For example: .. sourcecode:: console $ qemu-img convert -O raw image-converted.qcow image-converted-from-qcow2.raw .. - Mount the raw image to the system. - ``chroot`` to the mounted directory and remove the installed grub. - Build grub2 from sources and install to ``/usr/sbin``. - In ``/etc/sysconfig/selinux``, disable selinux ``SELINUX=disabled`` - In the configuration file, set ``onboot=yes`` and ``BOOTPROTO=dhcp`` for every interface. - Add the configuration files for all interfaces in the ``/etc/sysconfig/network-scripts`` directory. 5. Upload the Sahara disk image to Glance, and register it in the Sahara Image Registry. Referencing its separate kernel and initramfs images. 6. Configure the bare metal network for the Sahara cluster nodes: - Add bare metal servers to your environment manually referencing their IPMI addresses (Ironic does not detect servers), for Ironic to manage the servers power and network. Also, configure the scheduling information and add the required flavors. Please check the :ironic-doc:`Enrollment section of the Ironic installation guide `. 7. Launch your Sahara cluster on Ironic from the cluster template: * Log in to Horizon. * Go to Data Processing > Node Group Templates. * Find the templates that belong to the plugin you would like to use * Update those templates to use 'bare metal' flavor instead of the default one * Go to Data Processing > Cluster Templates. * Click Launch Cluster. * On the Launch Cluster dialog: * Specify the bare metal network for cluster nodes The cluster provisioning time is slower compared to the cluster provisioning of the same size that runs on VMs. Ironic does real hardware reports which is time consuming, and the whole root disk is filled from ``/dev/zero`` for security reasons. Known limitations: ------------------ * Security groups are not applied. * Nodes are not isolated by projects. * VM to Bare Metal network routing is not allowed. * The user has to specify the count of ironic nodes before Devstack deploys an OpenStack. * The user cannot use the same image for several ironic node types. For example, if there are 3 ironic node types, the user has to create 3 images and 3 flavors. * Multiple interfaces on a single node are not supported. Devstack configures only one interface. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/source/user/statuses.rst0000664000175000017500000001222500000000000020512 0ustar00zuulzuul00000000000000Sahara Cluster Statuses Overview ================================ All Sahara Cluster operations are performed in multiple steps. A Cluster object has a ``Status`` attribute which changes when Sahara finishes one step of operations and starts another one. Also a Cluster object has a ``Status description`` attribute which changes whenever Cluster errors occur. Sahara supports three types of Cluster operations: * Create a new Cluster * Scale/Shrink an existing Cluster * Delete an existing Cluster Creating a new Cluster ---------------------- 1. Validating ~~~~~~~~~~~~~ Before performing any operations with OpenStack environment, Sahara validates user input. There are two types of validations, that are done: * Check that a request contains all necessary fields and that the request does not violate any constraints like unique naming, etc. * Plugin check (optional). The provisioning Plugin may also perform any specific checks like a Cluster topology validation check. If any of the validations fails during creating, the Cluster object will still be kept in the database with an ``Error`` status. If any validations fails during scaling the ``Active`` Cluster, it will be kept with an ``Active`` status. In both cases status description will contain error messages about the reasons of failure. 2. InfraUpdating ~~~~~~~~~~~~~~~~ This status means that the Provisioning plugin is performing some infrastructure updates. 3. Spawning ~~~~~~~~~~~ Sahara sends requests to OpenStack for all resources to be created: * VMs * Volumes * Floating IPs (if Sahara is configured to use Floating IPs) It takes some time for OpenStack to schedule all the required VMs and Volumes, so sahara will wait until all of the VMs are in an ``Active`` state. 4. Waiting ~~~~~~~~~~ Sahara waits while VMs' operating systems boot up and all internal infrastructure components like networks and volumes are attached and ready to use. 5. Preparing ~~~~~~~~~~~~ Sahara prepares a Cluster for starting. This step includes generating the ``/etc/hosts`` file or changing ``/etc/resolv.conf`` file (if you use Designate service), so that all instances can access each other by a hostname. Also Sahara updates the ``authorized_keys`` file on each VM, so that VMs can communicate without passwords. 6. Configuring ~~~~~~~~~~~~~~ Sahara pushes service configurations to VMs. Both XML and JSON based configurations and environmental variables are set on this step. 7. Starting ~~~~~~~~~~~ Sahara is starting Hadoop services on Cluster's VMs. 8. Active ~~~~~~~~~ Active status means that a Cluster has started successfully and is ready to run EDP Jobs. Scaling/Shrinking an existing Cluster ------------------------------------- 1. Validating ~~~~~~~~~~~~~ Sahara checks the scale/shrink request for validity. The Plugin method called for performing Plugin specific checks is different from the validation method in creation. 2. Scaling ~~~~~~~~~~ Sahara performs database operations updating all affected existing Node Groups and creating new ones to join the existing Node Groups. 3. Adding Instances ~~~~~~~~~~~~~~~~~~~ Status is similar to ``Spawning`` in Cluster creation. Sahara adds required amount of VMs to the existing Node Groups and creates new Node Groups. 4. Configuring ~~~~~~~~~~~~~~ Status is similar to ``Configuring`` in Cluster creation. New instances are being configured in the same manner as already existing ones. The VMs in the existing Cluster are also updated with a new ``/etc/hosts`` file or ``/etc/resolv.conf`` file. 5. Decommissioning ~~~~~~~~~~~~~~~~~~ Sahara stops Hadoop services on VMs that will be deleted from a Cluster. Decommissioning a Data Node may take some time because Hadoop rearranges data replicas around the Cluster, so that no data will be lost after that Data Node is deleted. 6. Deleting Instances ~~~~~~~~~~~~~~~~~~~~~ Sahara sends requests to OpenStack to release unneeded resources: * VMs * Volumes * Floating IPs (if they are used) 7. Active ~~~~~~~~~ The same ``Active`` status as after Cluster creation. Deleting an existing Cluster ---------------------------- 1. Deleting ~~~~~~~~~~~ The only step, that releases all Cluster's resources and removes it from the database. 2. Force Deleting ~~~~~~~~~~~~~~~~~ In extreme cases the regular "Deleting" step may hang. Sahara APIv2 introduces the ability to force delete a Cluster. This prevents deleting from hanging but comes with the risk of orphaned resources. Error State ----------- If the Cluster creation fails, the Cluster will enter the ``Error`` state. This status means the Cluster may not be able to perform any operations normally. This cluster will stay in the database until it is manually deleted. The reason for failure may be found in the sahara logs. Also, the status description will contain information about the error. If an error occurs during the ``Adding Instances`` operation, Sahara will first try to rollback this operation. If a rollback is impossible or fails itself, then the Cluster will also go into an ``Error`` state. If a rollback was successful, Cluster will get into an ``Active`` state and status description will contain a short message about the reason of ``Adding Instances`` failure. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.657891 sahara-16.0.0/doc/test/0000775000175000017500000000000000000000000014604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/doc/test/redirect-tests.txt0000664000175000017500000000166300000000000020314 0ustar00zuulzuul00000000000000/sahara/pike/contributor/launchpad.html 301 /sahara/pike/contributor/project.html /sahara/queens/contributor/launchpad.html 301 /sahara/queens/contributor/project.html /sahara/latest/contributor/launchpad.html 301 /sahara/latest/contributor/project.html /sahara/latest/user/vanilla-imagebuilder.html 301 /sahara/latest/user/vanilla-plugin.html /sahara/latest/user/cdh-imagebuilder.html 301 /sahara/latest/user/cdh-plugin.html /sahara/latest/user/guest-requirements.html 301 /sahara/latest/user/building-guest-images.html /sahara/rocky/user/guest-requirements.html 301 /sahara/rocky/user/building-guest-images.html /sahara/latest/user/vanilla-plugin.html 301 /sahara-plugin-vanilla/latest/ /sahara/stein/user/storm-plugin.html 301 /sahara-plugin-storm/stein/ /sahara/latest/contributor/how-to-participate.html 301 /sahara/latest/contributor/contributing.html /sahara/latest/contributor/project.html 301 /sahara/latest/contributor/contributing.html ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/etc/0000775000175000017500000000000000000000000013633 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.657891 sahara-16.0.0/etc/edp-examples/0000775000175000017500000000000000000000000016217 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/edp-examples/README.rst0000664000175000017500000000027300000000000017710 0ustar00zuulzuul00000000000000===================== Sahara files for EDP ===================== All files from this directory have been moved to new sahara-tests repository: https://opendev.org/openstack/sahara-tests ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.657891 sahara-16.0.0/etc/sahara/0000775000175000017500000000000000000000000015072 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/sahara/README-sahara.conf.txt0000664000175000017500000000020100000000000020742 0ustar00zuulzuul00000000000000To generate the sample sahara.conf file, run the following command from the top level of the sahara directory: tox -e genconfig ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/sahara/api-paste.ini0000664000175000017500000000265500000000000017466 0ustar00zuulzuul00000000000000[pipeline:sahara] pipeline = cors http_proxy_to_wsgi request_id versions acl auth_validator sahara_api [composite:sahara_api] use = egg:Paste#urlmap /healthcheck: healthcheck /: sahara_apiv2 # this app is given as a reference for v1-only deployments # [app:sahara_apiv11] # paste.app_factory = sahara.api.middleware.sahara_middleware:Router.factory [app:sahara_apiv2] paste.app_factory = sahara.api.middleware.sahara_middleware:RouterV2.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = sahara [filter:request_id] paste.filter_factory = oslo_middleware.request_id:RequestId.factory [filter:acl] paste.filter_factory = keystonemiddleware.auth_token:filter_factory [filter:auth_validator] paste.filter_factory = sahara.api.middleware.auth_valid:AuthValidator.factory [filter:debug] paste.filter_factory = oslo_middleware.debug:Debug.factory [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware:HTTPProxyToWSGI.factory [filter:versions] paste.filter_factory = sahara.api.middleware.version_discovery:VersionResponseMiddlewareV2.factory # this filter is given as a reference for v1-only deployments #[filter:versions] #paste.filter_factory = sahara.api.middleware.version_discovery:VersionResponseMiddlewareV1.factory [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/sahara/healthcheck_disable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/sahara/compute.topology.sample0000664000175000017500000000016600000000000021627 0ustar00zuulzuul00000000000000edp-master-0001 /rack1 10.50.0.8 /rack1 edp-slave-0002 /rack1 10.50.0.5 /rack1 edp-slave-0001 /rack2 10.50.0.6 /rack2 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/sahara/rootwrap.conf0000664000175000017500000000220600000000000017616 0ustar00zuulzuul00000000000000# Configuration for sahara-rootwrap # This file should be owned by (and only-writable by) the root user [DEFAULT] # List of directories to load filter definitions from (separated by ','). # These directories MUST all be only writable by root ! filters_path=/etc/sahara/rootwrap.d,/usr/share/sahara/rootwrap # List of directories to search executables in, in case filters do not # explicitely specify a full path (separated by ',') # If not specified, defaults to system PATH environment variable. # These directories MUST all be only writable by root ! exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin # Enable logging to syslog # Default value is False use_syslog=False # Which syslog facility to use. # Valid values include auth, authpriv, syslog, local0, local1... # Default value is 'syslog' syslog_log_facility=syslog # Which messages to log. # INFO means log all usage # ERROR means only log unsuccessful attempts syslog_log_level=ERROR [xenapi] # XenAPI configuration is only required by the L2 agent if it is to # target a XenServer/XCP compute host's dom0. xenapi_connection_url= xenapi_connection_username=root xenapi_connection_password= ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.657891 sahara-16.0.0/etc/sahara/rootwrap.d/0000775000175000017500000000000000000000000017171 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/sahara/rootwrap.d/sahara.filters0000664000175000017500000000014600000000000022023 0ustar00zuulzuul00000000000000[Filters] ip: IpNetnsExecFilter, ip, root nc: CommandFilter, nc, root kill: CommandFilter, kill, root ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/sahara/swift.topology.sample0000664000175000017500000000003700000000000021304 0ustar00zuulzuul0000000000000010.10.1.86 /rack1 swift1 /rack1././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.657891 sahara-16.0.0/etc/sudoers.d/0000775000175000017500000000000000000000000015541 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/etc/sudoers.d/sahara-rootwrap0000664000175000017500000000012100000000000020570 0ustar00zuulzuul00000000000000sahara ALL=(root) NOPASSWD: /usr/bin/sahara-rootwrap /etc/sahara/rootwrap.conf * ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/lower-constraints.txt0000664000175000017500000000533700000000000017326 0ustar00zuulzuul00000000000000alabaster==0.7.10 alembic==0.9.6 appdirs==1.4.3 asn1crypto==0.24.0 astroid==1.3.8 bandit==1.1.0 bashate==0.5.1 bcrypt==3.1.4 botocore==1.5.1 cachetools==2.0.1 castellan==0.16.0 certifi==2018.1.18 cffi==1.14.0 chardet==3.0.4 click==6.7 cliff==2.11.0 cmd2==0.8.1 contextlib2==0.5.5 coverage==4.0 cryptography==2.5 debtcollector==1.19.0 decorator==4.2.1 deprecation==2.0 doc8==0.6.0 docutils==0.14 dogpile.cache==0.6.5 dulwich==0.19.0 enum-compat==0.0.2 eventlet==0.26.0 extras==1.0.0 fasteners==0.14.1 fixtures==3.0.0 Flask==1.0.2 future==0.16.0 futurist==1.6.0 gitdb2==2.0.3 GitPython==2.1.8 greenlet==0.4.16 idna==2.6 imagesize==1.0.0 iso8601==0.1.11 itsdangerous==0.24 Jinja2==2.10 jmespath==0.9.3 jsonpatch==1.21 jsonpointer==2.0 jsonschema==3.2.0 keystoneauth1==3.4.0 keystonemiddleware==4.17.0 kombu==5.0.1 linecache2==1.0.0 logilab-common==1.4.1 Mako==1.0.7 MarkupSafe==1.1.0 microversion-parse==0.2.1 monotonic==1.4 mox3==0.25.0 msgpack==0.5.6 munch==2.2.0 netaddr==0.7.19 netifaces==0.10.6 openstackdocstheme==1.20.0 openstacksdk==0.12.0 os-api-ref==1.6.0 os-client-config==1.29.0 os-service-types==1.2.0 osc-lib==1.10.0 oslo.cache==1.29.0 oslo.concurrency==3.26.0 oslo.config==6.8.0 oslo.context==2.22.0 oslo.db==6.0.0 oslo.i18n==3.15.3 oslo.log==3.36.0 oslo.messaging==10.2.0 oslo.middleware==3.31.0 oslo.policy==3.6.0 oslo.rootwrap==5.8.0 oslo.serialization==2.18.0 oslo.service==1.31.0 oslo.upgradecheck==1.3.0 oslo.utils==4.5.0 oslotest==3.2.0 packaging==20.4 paramiko==2.7.1 Paste==2.0.3 PasteDeploy==1.5.2 pbr==2.0.0 pika-pool==0.1.3 pika==0.10.0 prettytable==0.7.2 psycopg2==2.8.0 pyasn1==0.4.2 pycadf==2.7.0 pycparser==2.18 Pygments==2.2.0 pyinotify==0.9.6 pylint==1.4.5 PyMySQL==0.8.0 PyNaCl==1.2.1 pyOpenSSL==17.5.0 pyparsing==2.2.0 pyperclip==1.6.0 python-barbicanclient==4.6.0 python-cinderclient==3.3.0 python-dateutil==2.7.0 python-editor==1.0.3 python-glanceclient==2.8.0 python-heatclient==1.10.0 python-keystoneclient==3.8.0 python-manilaclient==1.16.0 python-mimeparse==1.6.0 python-neutronclient==6.7.0 python-novaclient==9.1.0 python-openstackclient==3.14.0 python-saharaclient==1.4.0 python-subunit==1.4.0 python-swiftclient==3.2.0 pytz==2018.3 PyYAML==5.1 reno==2.5.0 repoze.lru==0.7 requests==2.23.0 requestsexceptions==1.4.0 restructuredtext-lint==1.1.3 rfc3986==1.2.0 Routes==2.4.1 simplejson==3.13.2 smmap2==2.0.3 snowballstemmer==1.2.1 Sphinx==1.6.2 sphinxcontrib-httpdomain==1.3.0 sphinxcontrib-websupport==1.0.1 sqlalchemy-migrate==0.13.0 SQLAlchemy==1.0.10 sqlparse==0.2.4 statsd==3.2.2 stestr==1.0.0 stevedore==1.20.0 Tempita==0.5.2 tenacity==6.1.0 testresources==2.0.0 testscenarios==0.4 testtools==2.4.0 tooz==1.58.0 traceback2==1.4.0 unittest2==1.1.0 urllib3==1.22 vine==1.1.4 voluptuous==0.11.1 WebOb==1.7.1 Werkzeug==0.14.1 wrapt==1.10.11 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/playbooks/0000775000175000017500000000000000000000000015063 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.657891 sahara-16.0.0/playbooks/buildimages/0000775000175000017500000000000000000000000017350 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/playbooks/buildimages/run.yaml0000664000175000017500000000024200000000000021036 0ustar00zuulzuul00000000000000- hosts: all roles: - role: bindep bindep_profile: test bindep_dir: "{{ sahara_src_dir|default(zuul_work_dir) }}" - build-sahara-images-cli ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/pylintrc0000664000175000017500000000304100000000000014645 0ustar00zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. ignore=openstack [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. # W0703: Catch "Exception". disable=C0111,W0511,W0142,W0622,W0703 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Type attributes names can be 2 to 31 characters long, with lowercase and underscores attr-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names shold be at least 3 characters long and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{1,30}|setUp|tearDown)$ # Module names matching sahara-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(sahara-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [TYPECHECK] generated-members=query,node_template,status_code,data ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/releasenotes/0000775000175000017500000000000000000000000015551 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.677891 sahara-16.0.0/releasenotes/notes/0000775000175000017500000000000000000000000016701 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000000000000000021152 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-impala-2.2-c1649599649aff5c.yaml0000664000175000017500000000006000000000000024234 0ustar00zuulzuul00000000000000--- features: - Add impala 2.2 to MapR plugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-mapr-520-3ed6cd0ae9688e17.yaml0000664000175000017500000000007100000000000024064 0ustar00zuulzuul00000000000000--- features: - MaR 5.2.0 is supported in MapR plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-mapr-kafka-3a808bbc1aa21055.yaml0000664000175000017500000000005300000000000024513 0ustar00zuulzuul00000000000000--- features: - Add Kafka to MapR plugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-mapr-sentry-6012c08b55d679de.yaml0000664000175000017500000000005400000000000024733 0ustar00zuulzuul00000000000000--- features: - Add Sentry to MapR plugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-scheduler-edp-job-9eda17dd174e53fa.yaml0000664000175000017500000000010000000000000026156 0ustar00zuulzuul00000000000000--- features: - Add ability of scheduling EDP jobs for sahara ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-storm-version-1_1_0-3e10b34824706a62.yaml0000664000175000017500000000007400000000000025723 0ustar00zuulzuul00000000000000--- features: - Storm 1.1.0 is supported in Storm plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-upgrade-check-framework-9cd18dbc47b0efbd.yaml0000664000175000017500000000072300000000000027527 0ustar00zuulzuul00000000000000--- prelude: > Added new tool ``sahara-status upgrade check``. features: - | New framework for ``sahara-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Sahara upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``sahara-status upgrade check`` to check if Sahara deployment can be safely upgraded from N-1 to N release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add-wsgi-server-support-c8fbc3d76d4e42f6.yaml0000664000175000017500000000020300000000000026663 0ustar00zuulzuul00000000000000--- features: - Added support of running sahara-api as wsgi application. Use 'sahara-wsgi-api' command for use this feature. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add_kafka_in_cdh-774c7c051480c892.yaml0000664000175000017500000000007200000000000024761 0ustar00zuulzuul00000000000000--- features: - Kafka was added in CDH 5.5 and CDH 5.7 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/add_mapr_repo_configs-04af1a67350bfd24.yaml0000664000175000017500000000015400000000000026272 0ustar00zuulzuul00000000000000--- features: - MapR repositories now can be configured in general section of cluster template configs ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ambari-agent-pkg-install-timeout-param-d50e5c15e06fa51e.yaml0000664000175000017500000000016300000000000031407 0ustar00zuulzuul00000000000000--- features: - Adding the ability to change default timeout parameter for ambari agent package installation ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ambari-downscaling-b9ba759ce9c7325e.yaml0000664000175000017500000000007600000000000025643 0ustar00zuulzuul00000000000000--- fixes: - Fixed incorrect down scaling of ambari cluster ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ambari-hive-92b911e0a759ee88.yaml0000664000175000017500000000007300000000000024123 0ustar00zuulzuul00000000000000--- fixes: - Fixed launching Hive jobs in Ambari plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ambari-server-start-856403bc280dfba3.yaml0000664000175000017500000000007600000000000025670 0ustar00zuulzuul00000000000000--- fixes: - Starting Ambari clusters on Centos 7 is fixed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ambari26-image-pack-88c9aad59bf635b2.yaml0000664000175000017500000000012600000000000025472 0ustar00zuulzuul00000000000000--- features: - Adding the ability to create Ambari 2.6 images on sahara-image-pack ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ambari_2_4_image_generation_validation-47eabb9fa90384c8.yaml0000664000175000017500000000024000000000000031554 0ustar00zuulzuul00000000000000--- features: - Enables the creation and validation of Ambari 2.4 images using the new image generation process where libguestfs replaces the use of DIB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/api-insecure-cbd4fd5da71b29a3.yaml0000664000175000017500000000011400000000000024574 0ustar00zuulzuul00000000000000--- fixes: - Fixed api_insecure handling in sessions. Closed bug 1539498. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/api-v2-return-payload-a84a609db410228a.yaml0000664000175000017500000000020400000000000025745 0ustar00zuulzuul00000000000000--- other: - As part of the APIv2 work we changed all tenant_id references to project_id on the return payload of REST calls. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/apiv2-microversion-4c1a58ee8090e5a9.yaml0000664000175000017500000000025400000000000025546 0ustar00zuulzuul00000000000000--- features: - | Users of Sahara's APIv2 may request a microversion of that API, with "OpenStack-API-Version: data-processing [version]" in the request headers. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/apiv2-payload-tweaks-b73c20a35263d958.yaml0000664000175000017500000000073600000000000025613 0ustar00zuulzuul00000000000000--- other: - A few responses in the experimental (but nearly-stable) APIv2 have been tweaked. To be specific, the key `hadoop_version` has been replaced with `plugin_version`, the key `job` has been replaced with `job_template`, the key `job_execution` has been replaced with `job`, and the key `oozie_job_id` has been replaced with `engine_job_id`. In fact, these changes were all previously partially implemented, and are now completely implemented. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/apiv2-preview-release-b1ee8cc9b2fb01da.yaml0000664000175000017500000000063000000000000026405 0ustar00zuulzuul00000000000000--- features: - | Sahara's APIv2 is now exposed by default (although its state is still experimental). It has feature parity with Sahara's APIv1.1, but APIv2 brings better REST semantics, tweaks to some response payloads, and some other improvements. APIv2 will remain labeled experimental until it is stabilized following the addition of new features to it in the coming cycle(s). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/apiv2-stable-release-25ba9920c8e4632a.yaml0000664000175000017500000000013000000000000025621 0ustar00zuulzuul00000000000000--- prelude: > - Sahara's APIv2 is now considered stable, and no longer experimental. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/auto_configs_for_hdp-011d460d37dcdf02.yaml0000664000175000017500000000020600000000000026142 0ustar00zuulzuul00000000000000--- features: - Add ability to automatically generate better configurations for Ambari cluster by using 'ALWAYS_APPLY' strategy ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/boot-from-volume-e7078452fac1a4a0.yaml0000664000175000017500000000013600000000000025202 0ustar00zuulzuul00000000000000--- features: - Adding the ability to boot a Sahara cluster from volumes instead of images. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ca-cert-fix-5c434a82f9347039.yaml0000664000175000017500000000015500000000000023674 0ustar00zuulzuul00000000000000--- fixes: - CA certificate handling in keystone, nova, neutron and cinder clients are fixed (#330635) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh-5-5-35e582e149a05632.yaml0000664000175000017500000000007000000000000022621 0ustar00zuulzuul00000000000000--- features: - CDH 5.5.0 is supported in CDH plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh-513-bdce0d5d269d8f20.yaml0000664000175000017500000000007600000000000023211 0ustar00zuulzuul00000000000000--- features: - Adding support to CDH 5.13.0 in CDH plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh-labels-5695d95bce226051.yaml0000664000175000017500000000036100000000000023645 0ustar00zuulzuul00000000000000--- features: - Versions 5.5.0 and 5.7.0 of Cloudera plugin are declared as stable. deprecations: - Versions 5, 5.3.0, 5.4.0 of Cloudera plugin are deprecated. It is no longer maintainted and supposed to be removed in P release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh_5_11_0_image_generation_validation-6334ef6d04950935.yaml0000664000175000017500000000024000000000000031057 0ustar00zuulzuul00000000000000--- features: - Enables the creation and validation of CDH 5.11.0 images using the new image generation process where libguestfs replaces the use of DIB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh_5_11_support-10d4abb91bc4475f.yaml0000664000175000017500000000007100000000000025126 0ustar00zuulzuul00000000000000--- features: - CDH 5.11.0 is supported in CDH plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh_5_7_image_generation_validation-308e7529a9018663.yaml0000664000175000017500000000023700000000000030514 0ustar00zuulzuul00000000000000--- features: - Enables the creation and validation of CDH 5.7.0 images using the new image generation process where libguestfs replaces the use of DIB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh_5_7_support-9522cb9b4dce2378.yaml0000664000175000017500000000006600000000000025016 0ustar00zuulzuul00000000000000--- features: - CDH 5.7 is supported in CDH plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh_5_9_0_image_generation_validation-19d10e6468e30b4f.yaml0000664000175000017500000000024000000000000031136 0ustar00zuulzuul00000000000000--- features: - Enables the creation and validation of CDH 5.9.0 images using the new image generation process where libguestfs replaces the use of DIB. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/cdh_5_9_support-b603a2648b2e7b32.yaml0000664000175000017500000000007000000000000024714 0ustar00zuulzuul00000000000000--- features: - CDH 5.9.0 is supported in CDH plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/config-groups-ambari-837de6d33eb0fa87.yaml0000664000175000017500000000015700000000000026110 0ustar00zuulzuul00000000000000--- fixes: - After decommissioning hosts all assoicated configs groups will be removed in ambari plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/consolidate-cluster-creation-apiv2-5d5aceeb2e97c702.yaml0000664000175000017500000000033500000000000030750 0ustar00zuulzuul00000000000000--- features: - The experimental APIv2 supports simultaneous creation of multiple clusters only through POST /v2/clusters (using the `count` parameter). The POST /v2/clusters/multiple endpoint has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/convert-to-cluster-template-43d502496d18625e.yaml0000664000175000017500000000014500000000000027156 0ustar00zuulzuul00000000000000--- deprecations: - Convert to cluster template feature is no longer supported by all plugins. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-cdh_5_5-0da56b562170566f.yaml0000664000175000017500000000010200000000000024771 0ustar00zuulzuul00000000000000--- features: - Version 5.5.0 of Cloudera plugin is deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-hdp-a9ff0ecf6006da49.yaml0000664000175000017500000000023600000000000024652 0ustar00zuulzuul00000000000000--- deprecations: - The HDP 2.0.6 plugin is deprecated in Mitaka release and will be removed in Newton release. Please, use the Ambari 2.3 instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-json-formatted-policy-file-b267f288cba7e325.yaml0000664000175000017500000000175500000000000031117 0ustar00zuulzuul00000000000000--- upgrade: - | The default value of ``[oslo_policy] policy_file`` config option has been changed from ``policy.json`` to ``policy.yaml``. Operators who are utilizing customized or previously generated static policy JSON files (which are not needed by default), should generate new policy files or convert them in YAML format. Use the `oslopolicy-convert-json-to-yaml `_ tool to convert a JSON to YAML formatted policy file in backward compatible way. deprecations: - | Use of JSON policy files was deprecated by the ``oslo.policy`` library during the Victoria development cycle. As a result, this deprecation is being noted in the Xena cycle with an anticipated future removal of support by ``oslo.policy``. As such operators will need to convert to YAML policy files. Please see the upgrade notes for details on migration of any custom policy files. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-mapr-51-090423438e3dda20.yaml0000664000175000017500000000022500000000000024741 0ustar00zuulzuul00000000000000--- deprecations: - MapR 5.1.0.mrv2 is now deprecated and will be removed in Ocata release. It is recommended to use MapR 5.2.0.mrv2 instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-plugin-vanilla260-46e4b8fe96e8fe68.yaml0000664000175000017500000000007700000000000027227 0ustar00zuulzuul00000000000000--- deprecations: - Removed support of Vanilla 2.6.0 plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-sahara-all-entry-point-1446a00dab643b7b.yaml0000664000175000017500000000021400000000000030203 0ustar00zuulzuul00000000000000--- deprecations: - The `sahara-all` entry point is now deprecated. Please use the sahara-api and sahara-engine entry points instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-spark-version-131-98eccc79b13b6b8f.yaml0000664000175000017500000000007100000000000027214 0ustar00zuulzuul00000000000000--- deprecations: - Spark version 1.3.1 is deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/deprecate-storm-version-092.yaml-b9ff2b9ebbb983fc.yaml0000664000175000017500000000007100000000000030346 0ustar00zuulzuul00000000000000--- deprecations: - Storm version 0.9.2 is deprecated. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/designate-integration-784c5f7f29546015.yaml0000664000175000017500000000014100000000000026060 0ustar00zuulzuul00000000000000--- features: - Added integration of Designate for hostname resolution through dns servers ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/drop-py-2-7-bc282e43b26fbf17.yaml0000664000175000017500000000031200000000000023750 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of sahara to support python 2.7 is OpenStack Train. The minimum version of Python now supported by sahara is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/enable-mutable-configuration-2dd6b7a0e0fe4437.yaml0000664000175000017500000000031300000000000027575 0ustar00zuulzuul00000000000000--- features: - | Operators can now update the running configuration of Sahara processes by sending the parent process a "HUP" signal. Note: The configuration option must support mutation. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/engine-opt-258ff1ae9b04d628.yaml0000664000175000017500000000012500000000000024052 0ustar00zuulzuul00000000000000--- deprecations: - Option 'infrastructure engine' is removed from sahara configs. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/enhance-bfv-12bac06c4438675f.yaml0000664000175000017500000000036600000000000024100 0ustar00zuulzuul00000000000000--- features: - In Sahara APIv2, the type, availability zone, and locality of boot volumes may be expressed explicitly through the `boot_volume_type`, `boot_volume_availability_zone`, and `boot_volume_local_to_instance` parameters. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/event_log_for_hdp-a114511c477ef16d.yaml0000664000175000017500000000006100000000000025373 0ustar00zuulzuul00000000000000--- features: - Added event log for HDP plugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/fix-install-provision-events-c1bd2e05bf2be6bd.yaml0000664000175000017500000000011500000000000030044 0ustar00zuulzuul00000000000000--- fixes: - Fix uncompleted event logs for Oozie and Drill in MapR plugin.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/fixing-policy-inconsistencies-984020000cc3882a.yaml0000664000175000017500000000106600000000000027527 0ustar00zuulzuul00000000000000--- fixes: - | With APIv2 we detected some inconsistencies on the policies. This patch updates the policy to fix those incosistencies. other: - | All APIv2 policy names have been changed to the recommended format: specifically, changes to resource names (now _singular_, whereas previously they may have been _plural_, or otherwise inconsistent), action verbs (now fully independent of HTTP semantics) and overall formatting (hyphens replace underscores). Eventually, the remaining non-conforming policy names will be deprecated too. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/force-delete-apiv2-e372392bbc8639f8.yaml0000664000175000017500000000025200000000000025314 0ustar00zuulzuul00000000000000--- features: - The ability to force delete clusters is exposed in Sahara APIv2. The Heat service must support Stack Abandon for force delete to function properly. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/force-delete-changes-2e0881a99742c339.yaml0000664000175000017500000000037000000000000025540 0ustar00zuulzuul00000000000000--- features: - The behavior of force deletion of clusters (APIv2) has changed. Stack-abandon is no longer used. The response from the force-delete API call now includes the name of the stack which had underlain that deleted cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/hadoop-swift-domain-fix-c1dfdf6c52b5aa25.yaml0000664000175000017500000000026500000000000026652 0ustar00zuulzuul00000000000000--- fixes: - Hadoop is now better configured to use the proper Keystone domain for interaction with Swift; previously the 'default' domain may have been incorrectly used. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/hadoop-swift-jar-for-ambari-4439913b01d42468.yaml0000664000175000017500000000012700000000000026675 0ustar00zuulzuul00000000000000--- fixes: - This patch adds ability to work with swift by using Keystone API v3 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/hdfs-dfs-94a9c4f64cf8994f.yaml0000664000175000017500000000020600000000000023531 0ustar00zuulzuul00000000000000--- fixes: - | The command hdfs fs has been deprecated in favor of hdfs fs. This fixes will allow the use of Hbase service. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/hdp-removed-from-defaults-31d1e1f15973b682.yaml0000664000175000017500000000025600000000000026625 0ustar00zuulzuul00000000000000--- upgrade: - HDP plugin removed from default configuration list. End users who are using HDP should ensure that their configuration files continue to list "hdp". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/hdp25-b35ef99c240fc127.yaml0000664000175000017500000000010700000000000022725 0ustar00zuulzuul00000000000000--- features: - Implemented support of HDP 2.5 in the Ambari plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/hdp26-5a406d7066706bf1.yaml0000664000175000017500000000010700000000000022560 0ustar00zuulzuul00000000000000--- features: - Implemented support of HDP 2.6 in the Ambari plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/healthcheck-02e429a3ffcd9482.yaml0000664000175000017500000000044500000000000024254 0ustar00zuulzuul00000000000000--- features: - | Now healthcheck middleware is enabled by default. Applications like loadbalancer or monitoring tools can use ``/healthcheck`` path to monitor health of each API endpoints. Remove entries for healthcheck from ``api-paste.ini`` to disable this functionality. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/honor-endpoint-type-neutron-4583128c383d9745.yaml0000664000175000017500000000015600000000000027133 0ustar00zuulzuul00000000000000--- fixes: - Fixed issue with handling endpoint_type during creation neutron client, closed bug 1564805 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ironic-support-79e7ecad05f54029.yaml0000664000175000017500000000020600000000000025005 0ustar00zuulzuul00000000000000--- other: - We are assuring that ironic support was tested after latest updates to nova and sahara and it is fully functional. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/kerberos-76dd297462b7337c.yaml0000664000175000017500000000026000000000000023470 0ustar00zuulzuul00000000000000--- features: - Kerberos support implemented for Cloudera and Ambari plugins. New oozie client implemented to support authentication for oozie in kerberized cluster. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/key_manager_integration-e32d141809c8cc46.yaml0000664000175000017500000000024600000000000026611 0ustar00zuulzuul00000000000000--- features: - OpenStack Key Manager service can now be used by sahara to enable storage of sensitive information in an external service such as barbican. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/keypair-replacement-0c0cc3db0551c112.yaml0000664000175000017500000000016600000000000025712 0ustar00zuulzuul00000000000000--- features: - | Use a new keypair to access to the running cluster when the cluster's keypair is deleted. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/keystoneclient-to-keystonauth-migration-c75988975ad1a506.yaml0000664000175000017500000000016000000000000031701 0ustar00zuulzuul00000000000000--- upgrade: - migration from keystoneclient to keystoneauth is done for using auth features of keystone. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/mapr-health-check-2eba3d742a2b853f.yaml0000664000175000017500000000007500000000000025332 0ustar00zuulzuul00000000000000--- features: - Custom health check is added to MapR plugin././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/mapr-labels-5cc318616db59403.yaml0000664000175000017500000000036600000000000024045 0ustar00zuulzuul00000000000000--- features: - MapR 5.1.0.mrv2 is now Enabled. deprecations: - MapR 5.0.0.mrv2 is now Deprecated. It is not recommended for usage. It is better to use MapR 5.1.0.mrv2 instead. This version of plugin will be removed in Ocata release. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/mapr-remove-spark-standalone-293ca864de9a7848.yaml0000664000175000017500000000010400000000000027435 0ustar00zuulzuul00000000000000--- features: - Remove support for Spark standalone in MapR plugin././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/mapr-services-new-versions-b32c2e8fe07d1600.yaml0000664000175000017500000000025300000000000027205 0ustar00zuulzuul00000000000000--- features: - The following service versions were added to MapR 5.2.0 plugin - HBase 1.1 - Drill 1.6 - Mahout 0.11 0.12 - Spark 1.6.1 - Impala 2.5 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/mapr-services-new-versions-dc7652e33f26bbdc.yaml0000664000175000017500000000022600000000000027356 0ustar00zuulzuul00000000000000--- features: - The following service versions were added to MapR 5.2.0 plugin - Pig 0.16 - Spark 2.0.1 - Hue 3.10 - Drill 1.8, 1.9 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/mapr5.2.0-image-gen-c850e74977b00abe.yaml0000664000175000017500000000015400000000000025154 0ustar00zuulzuul00000000000000--- features: - Adding ability to create and validate MapR 5.2.0 images using the new image gen tool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/neutron-default-a6baf93d857d86b3.yaml0000664000175000017500000000022400000000000025207 0ustar00zuulzuul00000000000000--- upgrade: - Neutron is used by default now (use_neutron=True). Nova-network is not functionaly for most use cases starting from Ocata. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/nova-network-removal-debe306fd7c61268.yaml0000664000175000017500000000055700000000000026172 0ustar00zuulzuul00000000000000--- issues: - Ironic integration might be broken if floating IPs are used, due to the use of pre-created ports by the Sahara engine. The status of Ironic support was untested for this release. deprecations: - Support for nova-network is removed, reflective of its removal from nova itself and from python-novaclient. use_neutron=False is unsupported. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/novaclient_images_to_glanceclient-0266a2bd92b4be05.yaml0000664000175000017500000000010500000000000030663 0ustar00zuulzuul00000000000000--- upgrade: - Migration from novaclient.v2.images to glanceclient ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/ntp-config-51ed9d612132e2fa.yaml0000664000175000017500000000036000000000000024036 0ustar00zuulzuul00000000000000--- fixes: - | This fixes the issue with NTP configuration where a prefered server provided by the user is added to the end of the file and the defaults are not deleted. Here we add the prefered server to the top of the file. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/optional-project-id-apiv1-2e89756f6f16bd5e.yaml0000664000175000017500000000012100000000000026720 0ustar00zuulzuul00000000000000--- other: - The presence of project ID in Sahara APIv1 paths is now optional. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/options-to-oslo_messaging_notifications-cee206fc4f74c217.yaml0000664000175000017500000000011600000000000032133 0ustar00zuulzuul00000000000000--- upgrade: - Move notifications options into oslo_messaging_notifications ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/plugins-split-from-sahara-core-9ffc5e5d06c9239c.yaml0000664000175000017500000000024600000000000030040 0ustar00zuulzuul00000000000000--- features: - | In an effort to improve Sahara's usuability and manutenability we are splitting the plugins from Sahara core into their own repositories. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/policy_in_code-5847902775ff9861.yaml0000664000175000017500000000061200000000000024512 0ustar00zuulzuul00000000000000--- features: - This feature allows the policy enforcement to be done in code thus facilitating better maintenance of the policy file. In code the default policies are set and the operator only needs to change the policy file if they wish to override the rule or role for a specific policy or operation. Also, a complete policy file can be generated using genconfig tool. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/proxy-user-lowercase-f116f7b7e89274cb.yaml0000664000175000017500000000032000000000000026130 0ustar00zuulzuul00000000000000--- upgrade: - | The default proxy role for Swift is now member instead of Member. Keystone now creates the former by default, even if the latter is recognized to be the same (case preserving). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/rack_awareness_for_cdh-e0cd5d4ab46aa1b5.yaml0000664000175000017500000000010700000000000026654 0ustar00zuulzuul00000000000000--- features: - Added rack awareness feature for CDH 5.5 and CDH 5.7 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/rack_awareness_for_hdp-6e3d44468cc141a5.yaml0000664000175000017500000000007600000000000026414 0ustar00zuulzuul00000000000000--- features: - Added rack awareness feature for HDP plugin ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/refactor-floating-ips-logic-9d37d9297f3621b3.yaml0000664000175000017500000000026700000000000027156 0ustar00zuulzuul00000000000000--- features: - Refactoring the logic on how floating ips are used by Sahara. This change will allow the coexistence of cluster using floating ips with cluster that do not. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-cdh_5.0_5.3_5.4-b5f140e9b0233c07.yaml0000664000175000017500000000012100000000000025266 0ustar00zuulzuul00000000000000--- features: - Versions 5.0.0 5.3.0 and 5.4.0 of Cloudera plugin are removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-hard-coded-oozie-password-b97475c8772aa1bd.yaml0000664000175000017500000000015700000000000030263 0ustar00zuulzuul00000000000000--- fixes: - Fixed issues with hardcoded password during creation MySQL database for Oozie, bug 1541122. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-hardcoded-password-from-hive-eb923b518974e853.yaml0000664000175000017500000000014400000000000030621 0ustar00zuulzuul00000000000000--- fixes: - Fixed issues with hardcoded password during starting hive process, bug 1498035. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-hdp-137d0ad3d2389b7a.yaml0000664000175000017500000000013600000000000024043 0ustar00zuulzuul00000000000000--- deprecations: - Support of HDP 2.0.6 plugin was removed. Use Ambari plugin instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-mapr-500-3df3041be99a864c.yaml0000664000175000017500000000010100000000000024532 0ustar00zuulzuul00000000000000--- deprecations: - Removed support for the MapR 5.0.0 plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-spark-100-44f3d5efc3806410.yaml0000664000175000017500000000010200000000000024622 0ustar00zuulzuul00000000000000--- deprecations: - Removed support for the Spark 1.0.0 plugin. ././@PaxHeader0000000000000000000000000000021100000000000011447 xustar0000000000000000115 path=sahara-16.0.0/releasenotes/notes/remove-upload-oozie-sharelib-step-in-vanilla-2.8.2-546b2026e2f5d557.yaml 22 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-upload-oozie-sharelib-step-in-vanilla-2.8.2-546b2026e2f5d5570000664000175000017500000000033500000000000032043 0ustar00zuulzuul00000000000000--- issues: - | Remove the step "upload httpclient to oozie/sharelib" in sahara code. User should use latest vanilla-2.8.2 image which is built on SIE "Change-ID: I3a25ee8c282849911089adf6c3593b1bb50fd067". ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove-use-neutron-2499b661dce041d4.yaml0000664000175000017500000000025500000000000025503 0ustar00zuulzuul00000000000000--- upgrade: - | Nova network has been fully removed from the OpenStack codebase, remove all instances of switches on use_neutron and the configuration value. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove_custom_auth_domainname-984fd2d931e306cc.yaml0000664000175000017500000000035500000000000030113 0ustar00zuulzuul00000000000000--- deprecations: - The custom admin_user_domain_name and admin_project_domain_name configuration options have been removed; they are provided by keystone_authtoken as user_domain_name and project_domain_name respectively. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/remove_enable_notifications_opt-4c0d46e8e79eb06f.yaml0000664000175000017500000000030700000000000030512 0ustar00zuulzuul00000000000000--- deprecations: - The 'enable' option of the 'oslo_messaging_notifications' section has been removed. To enable notifications now please specify the 'driver' option in the same section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/s3-datasource-protocol-d3abd0b22f653b3b.yaml0000664000175000017500000000013600000000000026435 0ustar00zuulzuul00000000000000--- other: - | The URL of an S3 data source may have `s3://` or `s3a://`, equivalently. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/sahara-cfg-location-change-7b61454311b16ce8.yaml0000664000175000017500000000021600000000000026661 0ustar00zuulzuul00000000000000--- upgrade: - | Sample configuration files previously installed in share/sahara will now be installed into etc/sahara instead. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/sahara-endpoint-version-discovery-826e9f31093cb10f.yaml0000664000175000017500000000064700000000000030476 0ustar00zuulzuul00000000000000--- prelude: > - Sahara APIv2 is reaching a point of maturity. Therefore, new deployments should include an **unversioned** endpoint in the service catalog for the "data-processing" service, for the purposes of more intuitive version discovery. Eventually existing deployments should switch to an unversioned endpoint, too, but only after time is given for the use of older clients to be less likely. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/some-polish-api-v2-2d2e390a74b088f9.yaml0000664000175000017500000000134400000000000025265 0ustar00zuulzuul00000000000000--- other: - Some polishings to APIv2 have been made in an effort to bring it from experimental (and therefore, evolving and unpredictable) to stable. More instances of `tenant_id` have been changed to `project_id`, in the cluster and job template APIs. `job_id` was changed to `job_template_id` in the job API. The newly-minted query string validation feature has been fixed to allow `show_progress` as a parameter on cluster GET; on a similar note some APIv2 endpoints which previously could be filtered by `hadoop_version` are now filtered by `plugin_version` instead. Also, the schema for cluster PATCH in APIv1.1 now no longer includes the key `update_keypair`; its prior inclusion was a mistake. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/spark-2.2-d7c3a84bd52f735a.yaml0000664000175000017500000000006600000000000023504 0ustar00zuulzuul00000000000000--- features: - Adding Spark version 2.2 to Sahara. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/spark-2.3-0277fe9feae6668a.yaml0000664000175000017500000000007500000000000023524 0ustar00zuulzuul00000000000000--- upgrade: - Adding Spark 2.3 to supported plugins list. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/storm-1.2-af75fedb413de56a.yaml0000664000175000017500000000015400000000000023665 0ustar00zuulzuul00000000000000--- upgrade: - Adding new versions of Storm, 1.2.0 and 1.2.1. Both will exist under the same tag 1.2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/strict-validation-query-string-a6cadbf2f9c57d06.yaml0000664000175000017500000000035600000000000030336 0ustar00zuulzuul00000000000000--- other: - In APIv2 there is now strict checking of parameters in the query string. This means that unexpected values in the query string will give a 400 error (as opposed to previously being ignored, or causing a 500 error). ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/substring-matching-1d5981b8e5b1d919.yaml0000664000175000017500000000041000000000000025537 0ustar00zuulzuul00000000000000--- fixes: - Add regular expression matching on search values for certain string fields of sahara objects. This applies to list operations through the REST API and therefore applies to the dashboard and sahara client as well. Closes bug 1503345. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/support-s3-data-source-a912e2cdf4cd51fb.yaml0000664000175000017500000000013400000000000026456 0ustar00zuulzuul00000000000000--- features: - An EDP data source may reference a file stored in a S3-like object store. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/support-s3-job-binary-6d91267ae11d09d3.yaml0000664000175000017500000000013300000000000026007 0ustar00zuulzuul00000000000000--- features: - An EDP job binary may reference a file stored in a S3-like object store. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/transport_url-5bbbf0bb54d81727.yaml0000664000175000017500000000030200000000000024766 0ustar00zuulzuul00000000000000--- features: - Separate transport url can be used for notifications purposes now, to enable this feature 'transport_url' should be provided in 'oslo_messaging_notifications' section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/trustee-conf-section-5994dcd48a9744d7.yaml0000664000175000017500000000053000000000000026026 0ustar00zuulzuul00000000000000--- deprecations: - | The use of [keystone_authtoken] credentials for trust creation is now deprecated. Please use the new [trustee] config section. The options ``username``, ``password``, ``project_name``, ``user_domain_name``, ``project_domain_name``, and ``auth_url`` (with version) are obligatory within that section. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/updating-plugins-versions-b8d27764178c3cdd.yaml0000664000175000017500000000102200000000000027146 0ustar00zuulzuul00000000000000--- prelude: > Every new release of Sahara we update our plugins list. Some new versions are added and some removed and other marked as deprecated. For Rocky we are deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 as well as Storm 1.0.1. We are also removing CDH 5.5.0, MapR 5.1.0, Spark 1.3.1 and Storm 0.9.2. deprecations: - We are deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 and Storm 1.0.1. upgrade: - We are removing some plugins versions. Those are CDH 5.5.0, MapR 5.1.0, Spark 1.3.1 and Storm 0.9.2. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/vanilla-2.7.5-support-ffeeb88fc4be34b4.yaml0000664000175000017500000000011100000000000026113 0ustar00zuulzuul00000000000000--- features: - | Support deploy hadoop 2.7.5 with vanilla plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/vanilla-2.8.2-support-84c89aad31105584.yaml0000664000175000017500000000011100000000000025441 0ustar00zuulzuul00000000000000--- features: - | Support deploy hadoop 2.8.2 with vanilla plugin. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/notes/zookeeper-configuration-steps-48c3d9706c86f227.yaml0000664000175000017500000000036200000000000027665 0ustar00zuulzuul00000000000000--- prelude: > Documentation about distributed periodics are extended with steps about installation additional libs required for correct work of coordination backend. Please refer Advanced Configuration Guide for details. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/releasenotes/source/0000775000175000017500000000000000000000000017051 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/releasenotes/source/_static/0000775000175000017500000000000000000000000020477 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000000000000000022750 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/releasenotes/source/_templates/0000775000175000017500000000000000000000000021206 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000000000000000023457 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/conf.py0000664000175000017500000001472500000000000020361 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Sahara Release Notes documentation build configuration file extensions = [ 'reno.sphinxext', 'openstackdocstheme' ] # openstackdocstheme options repository_name = 'openstack/sahara' use_storyboard = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. copyright = '2015, Sahara Developers' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'SaharaReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'SaharaReleaseNotes.tex', 'Sahara Release Notes Documentation', 'Sahara Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'saharareleasenotes', 'Sahara Release Notes Documentation', ['Sahara Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'SaharaReleaseNotes', 'Sahara Release Notes Documentation', 'Sahara Developers', 'SaharaReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/index.rst0000664000175000017500000000035700000000000020717 0ustar00zuulzuul00000000000000====================== Sahara Release Notes ====================== .. toctree:: :maxdepth: 1 unreleased xena wallaby victoria ussuri train stein rocky queens pike ocata newton mitaka liberty ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/liberty.rst0000664000175000017500000000022200000000000021251 0ustar00zuulzuul00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020310 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021262 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000023047 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000011331000000000000026077 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: sahara\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2020-04-30 22:35+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-05-02 09:29+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "10.0.0" msgstr "10.0.0" msgid "12.0.0.0rc1" msgstr "12.0.0.0rc1" msgid "3.0.1" msgstr "3.0.1" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "4.1.0" msgstr "4.1.0" msgid "5.0.0" msgstr "5.0.0" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "7.0.3" msgstr "7.0.3" msgid "8.0.0" msgstr "8.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "8.0.2" msgstr "8.0.2" msgid "8.0.3" msgstr "8.0.3" msgid "9.0.0" msgstr "9.0.0" msgid "9.0.1" msgstr "9.0.1" msgid "9.0.2" msgstr "9.0.2" msgid "" "A few responses in the experimental (but nearly-stable) APIv2 have been " "tweaked. To be specific, the key `hadoop_version` has been replaced with " "`plugin_version`, the key `job` has been replaced with `job_template`, the " "key `job_execution` has been replaced with `job`, and the key `oozie_job_id` " "has been replaced with `engine_job_id`. In fact, these changes were all " "previously partially implemented, and are now completely implemented." msgstr "" "A few responses in the experimental (but nearly-stable) APIv2 have been " "tweaked. To be specific, the key `hadoop_version` has been replaced with " "`plugin_version`, the key `job` has been replaced with `job_template`, the " "key `job_execution` has been replaced with `job`, and the key `oozie_job_id` " "has been replaced with `engine_job_id`. In fact, these changes were all " "previously partially implemented, and are now completely implemented." msgid "Add Kafka to MapR plugin" msgstr "Add Kafka to MapR plugin" msgid "Add Sentry to MapR plugin" msgstr "Add Sentry to MapR plugin" msgid "Add ability of scheduling EDP jobs for sahara" msgstr "Add ability of scheduling EDP jobs for sahara" msgid "" "Add ability to automaticaly generate better configurations for Ambari " "cluster by using 'ALWAYS_APPLY' strategy" msgstr "" "Add ability to automatically generate better configurations for Ambari " "cluster by using 'ALWAYS_APPLY' strategy" msgid "Add impala 2.2 to MapR plugin" msgstr "Add impala 2.2 to MapR plugin" msgid "" "Add regular expression matching on search values for certain string fields " "of sahara objects. This applies to list operations through the REST API and " "therefore applies to the dashboard and sahara client as well. Closes bug " "1503345." msgstr "" "Add regular expression matching on search values for certain string fields " "of Sahara objects. This applies to list operations through the REST API and " "therefore applies to the dashboard and Sahara client as well. Closes bug " "1503345." msgid "Added additional filter to volume_type check, closed bug 1508017" msgstr "Added additional filter to volume_type check, closed bug 1508017" msgid "Added event log for HDP plugin" msgstr "Added event log for HDP plugin" msgid "" "Added integration of Designate for hostname resolution through dns servers" msgstr "" "Added integration of Designate for hostname resolution through DNS servers" msgid "Added new tool ``sahara-status upgrade check``." msgstr "Added new tool ``sahara-status upgrade check``." msgid "Added rack awareness feature for CDH 5.5 and CDH 5.7" msgstr "Added rack awareness feature for CDH 5.5 and CDH 5.7" msgid "Added rack awareness feature for HDP plugin" msgstr "Added rack awareness feature for HDP plugin" msgid "" "Added support of running sahara-api as wsgi application. Use 'sahara-wsgi-" "api' command for use this feature." msgstr "" "Added support of running sahara-api as wsgi application. Use 'sahara-wsgi-" "api' command for use this feature." msgid "Adding Spark 2.3 to supported plugins list." msgstr "Adding Spark 2.3 to supported plugins list." msgid "Adding Spark version 2.2 to Sahara." msgstr "Adding Spark version 2.2 to Sahara." msgid "" "Adding ability to create and validate MapR 5.2.0 images using the new image " "gen tool." msgstr "" "Adding ability to create and validate MapR 5.2.0 images using the new image " "gen tool." msgid "" "Adding new versions of Storm, 1.2.0 and 1.2.1. Both will exist under the " "same tag 1.2." msgstr "" "Adding new versions of Storm, 1.2.0 and 1.2.1. Both will exist under the " "same tag 1.2." msgid "Adding support to CDH 5.13.0 in CDH plugin." msgstr "Adding support to CDH 5.13.0 in CDH plugin." msgid "" "Adding the ability to boot a Sahara cluster from volumes instead of images." msgstr "" "Adding the ability to boot a Sahara cluster from volumes instead of images." msgid "" "Adding the ability to change default timeout parameter for ambari agent " "package installation" msgstr "" "Adding the ability to change default timeout parameter for Ambari agent " "package installation" msgid "Adding the ability to create Ambari 2.6 images on sahara-image-pack" msgstr "Adding the ability to create Ambari 2.6 images on sahara-image-pack" msgid "" "After decommissioning hosts all assoicated configs groups will be removed in " "ambari plugin." msgstr "" "After decommissioning hosts all associated configs groups will be removed in " "Ambari plugin." msgid "" "All APIv2 policy names have been changed to the recommended format: " "specifically, changes to resource names (now _singular_, whereas previously " "they may have been _plural_, or otherwise inconsistent), action verbs (now " "fully independent of HTTP semantics) and overall formatting (hyphens replace " "underscores). Eventually, the remaining non-conforming policy names will be " "deprecated too." msgstr "" "All APIv2 policy names have been changed to the recommended format: " "specifically, changes to resource names (now _singular_, whereas previously " "they may have been _plural_, or otherwise inconsistent), action verbs (now " "fully independent of HTTP semantics) and overall formatting (hyphens replace " "underscores). Eventually, the remaining non-conforming policy names will be " "deprecated too." msgid "" "An EDP data source may reference a file stored in a S3-like object store." msgstr "" "An EDP data source may reference a file stored in a S3-like object store." msgid "" "An EDP job binary may reference a file stored in a S3-like object store." msgstr "" "An EDP job binary may reference a file stored in a S3-like object store." msgid "" "As part of the APIv2 work we changed all tenant_id references to project_id " "on the return payload of REST calls." msgstr "" "As part of the APIv2 work we changed all tenant_id references to project_id " "on the return payload of REST calls." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "" "CA certificate handling in keystone, nova, neutron and cinder clients are " "fixed (#330635)" msgstr "" "CA certificate handling in Keystone, Nova, Neutron and Cinder clients are " "fixed (#330635)" msgid "CDH 5.11.0 is supported in CDH plugin." msgstr "CDH 5.11.0 is supported in CDH plugin." msgid "CDH 5.5.0 is supported in CDH plugin." msgstr "CDH 5.5.0 is supported in CDH plugin." msgid "CDH 5.7 is supported in CDH plugin." msgstr "CDH 5.7 is supported in CDH plugin." msgid "CDH 5.9.0 is supported in CDH plugin." msgstr "CDH 5.9.0 is supported in CDH plugin." msgid "" "Cluster with 'is_protected' field can be created now, closed bug 1510929" msgstr "" "Cluster with 'is_protected' field can be created now, closed bug 1510929" msgid "" "Convert to cluster template feature is no longer supported by all plugins." msgstr "" "Convert to cluster template feature is no longer supported by all plugins." msgid "Correctly configure Spark with Hive and HBase on different nodes." msgstr "Correctly configure Spark with Hive and HBase on different nodes." msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "Custom health check is added to MapR plugin" msgstr "Custom health check is added to MapR plugin" msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Documentation about distributed periodics are extended with steps about " "installation additional libs required for correct work of coordination " "backend. Please refer Advanced Configuration Guide for details." msgstr "" "Documentation about distributed periodics are extended with steps about " "installation additional libs required for correct work of coordination " "backend. Please refer Advanced Configuration Guide for details." msgid "" "Enables the creation and validation of Ambari 2.4 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Enables the creation and validation of Ambari 2.4 images using the new image " "generation process where libguestfs replaces the use of DIB." msgid "" "Enables the creation and validation of CDH 5.11.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Enables the creation and validation of CDH 5.11.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgid "" "Enables the creation and validation of CDH 5.7.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Enables the creation and validation of CDH 5.7.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgid "" "Enables the creation and validation of CDH 5.9.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Enables the creation and validation of CDH 5.9.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgid "Escape unhandled unicode characters in environment configuration files" msgstr "Escape unhandled Unicode characters in environment configuration files" msgid "" "Event log hanling fixed in case of problems with volumes formatting, closed " "bug 1506839" msgstr "" "Event log handling fixed in case of problems with volumes formatting, closed " "bug 1506839" msgid "" "Every new release of Sahara we update our plugins list. Some new versions " "are added and some removed and other marked as deprecated. For Rocky we are " "deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 as well as Storm 1.0.1. We are " "also removing CDH 5.5.0, MapR 5.1.0, Spark 1.3.1 and Storm 0.9.2." msgstr "" "Every new release of Sahara we update our plugins list. Some new versions " "are added and some removed and other marked as deprecated. For Rocky we are " "deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 as well as Storm 1.0.1. We are " "also removing CDH 5.5.0, MapR 5.1.0, Spark 1.3.1 and Storm 0.9.2." msgid "Fix Hue intergation with Spark and Hive on different nodes" msgstr "Fix Hue integration with Spark and Hive on different nodes" msgid "Fix creating Hive database schema automatically." msgstr "Fix creating Hive database schema automatically." msgid "Fix unavailable MCS link." msgstr "Fix unavailable MCS link." msgid "Fix uncompleted event logs for Oozie and Drill in MapR plugin." msgstr "Fix uncompleted event logs for Oozie and Drill in MapR plugin." msgid "Fix visualization of MapR versions when running on python 3" msgstr "Fix visualisation of MapR versions when running on Python 3" msgid "Fixed api_insecure handling in sessions. Closed bug 1539498." msgstr "Fixed api_insecure handling in sessions. Closed bug 1539498." msgid "Fixed arguments list in latest cdh plugin" msgstr "Fixed arguments list in latest CDH plugin" msgid "Fixed client tempest tests" msgstr "Fixed client tempest tests" msgid "Fixed incorrect down scaling of ambari cluster" msgstr "Fixed incorrect down scaling of Ambari cluster" msgid "" "Fixed issue with handling endpoint_type during creation neutron client, " "closed bug 1564805" msgstr "" "Fixed issue with handling endpoint_type during creation neutron client, " "closed bug 1564805" msgid "Fixed issue with incorrect retrieving of auth url." msgstr "Fixed issue with incorrect retrieving of auth URL." msgid "Fixed issue with launching clusters without auto-security groups." msgstr "Fixed issue with launching clusters without auto-security groups." msgid "" "Fixed issues with hardcoded password during creation MySQL database for " "Oozie, bug 1541122." msgstr "" "Fixed issues with hardcoded password during creation MySQL database for " "Oozie, bug 1541122." msgid "" "Fixed issues with hardcoded password during starting hive process, bug " "1498035." msgstr "" "Fixed issues with hardcoded password during starting Hive process, bug " "1498035." msgid "Fixed issues with using Swift as an output datasource." msgstr "Fixed issues with using Swift as an output data source." msgid "Fixed launching Hive jobs in Ambari plugin." msgstr "Fixed launching Hive jobs in Ambari plugin." msgid "" "Fixed problems with validation of job binaries update, closed bug 1508507" msgstr "" "Fixed problems with validation of job binaries update, closed bug 1508507" msgid "Fixed search of devices that need to be mounted, closed bug 1508872" msgstr "Fixed search of devices that need to be mounted, closed bug 1508872" msgid "" "Fixed several bugs which prevented sahara-image-pack from generating Ambari-" "based Ubuntu images." msgstr "" "Fixed several bugs which prevented sahara-image-pack from generating Ambari-" "based Ubuntu images." msgid "Fixed tempest config group" msgstr "Fixed Tempest config group" msgid "" "Fixes `story 2002551`_ in which CDH cluster creation was stuck when the " "image was generated using sahara-image-elements, and at the same time extjs " "2.2 was not injected inside the images generated using sahara-image-pack." msgstr "" "Fixes `story 2002551`_ in which CDH cluster creation was stuck when the " "image was generated using sahara-image-elements, and at the same time extjs " "2.2 was not injected inside the images generated using sahara-image-pack." msgid "" "HDP plugin removed from default configuration list. End users who are using " "HDP should ensure that their configuration files continue to list \"hdp\"." msgstr "" "HDP plugin removed from default configuration list. End users who are using " "HDP should ensure that their configuration files continue to list \"hdp\"." msgid "" "Hadoop is now better configured to use the proper Keystone domain for " "interaction with Swift; previously the 'default' domain may have been " "incorrectly used." msgstr "" "Hadoop is now better configured to use the proper Keystone domain for " "interaction with Swift; previously the 'default' domain may have been " "incorrectly used." msgid "Heat engine shows reasons in case of failure" msgstr "Heat engine shows reasons in case of failure" msgid "Implemented support of HDP 2.5 in the Ambari plugin." msgstr "Implemented support of HDP 2.5 in the Ambari plugin." msgid "Implemented support of HDP 2.6 in the Ambari plugin." msgstr "Implemented support of HDP 2.6 in the Ambari plugin." msgid "" "In APIv2 there is now strict checking of parameters in the query string. " "This means that unexpected values in the query string will give a 400 error " "(as opposed to previously being ignored, or causing a 500 error)." msgstr "" "In APIv2 there is now strict checking of parameters in the query string. " "This means that unexpected values in the query string will give a 400 error " "(as opposed to previously being ignored, or causing a 500 error)." msgid "" "In Sahara APIv2, the type, availability zone, and locality of boot volumes " "may be expressed explicitly through the `boot_volume_type`, " "`boot_volume_availability_zone`, and `boot_volume_local_to_instance` " "parameters." msgstr "" "In Sahara APIv2, the type, availability zone, and locality of boot volumes " "may be expressed explicitly through the `boot_volume_type`, " "`boot_volume_availability_zone`, and `boot_volume_local_to_instance` " "parameters." msgid "" "In an effort to improve Sahara's usuability and manutenability we are " "splitting the plugins from Sahara core into their own repositories." msgstr "" "In an effort to improve Sahara's usability and maintainability we are " "splitting the plugins from Sahara core into their own repositories." msgid "In case of XFS volume will be formatted with '-f' option." msgstr "In case of XFS volume will be formatted with '-f' option." msgid "" "Ironic integration might be broken if floating IPs are used, due to the use " "of pre-created ports by the Sahara engine. The status of Ironic support was " "untested for this release." msgstr "" "Ironic integration might be broken if Floating IPs are used, due to the use " "of pre-created ports by the Sahara engine. The status of Ironic support was " "untested for this release." msgid "" "Issues with job types call are fixed. Jobs can be launched from UI if ambari " "plugin enabled." msgstr "" "Issues with job types call are fixed. Jobs can be launched from UI if Ambari " "plugin enabled." msgid "" "Job execution can be created with 'is_protected' field, closed bug 1510994" msgstr "" "Job execution can be created with 'is_protected' field, closed bug 1510994" msgid "Kafka was added in CDH 5.5 and CDH 5.7" msgstr "Kafka was added in CDH 5.5 and CDH 5.7" msgid "" "Kerberos support implemented for Cloudera and Ambari plugins. New oozie " "client implemented to support authentication for oozie in kerberized cluster." msgstr "" "Kerberos support implemented for Cloudera and Ambari plugins. New Oozie " "client implemented to support authentication for Oozie in Kerberised cluster." msgid "Known Issues" msgstr "Known Issues" msgid "Liberty Series Release Notes" msgstr "Liberty Series Release Notes" msgid "MaR 5.2.0 is supported in MapR plugin." msgstr "MapR 5.2.0 is supported in MapR plugin." msgid "Make Oozie use correct maprfs jar in MapR plugin" msgstr "Make Oozie use correct maprfs jar in MapR plugin" msgid "" "MapR 5.0.0.mrv2 is now Deprecated. It is not recommended for usage. It is " "better to use MapR 5.1.0.mrv2 instead. This version of plugin will be " "removed in Ocata release." msgstr "" "MapR 5.0.0.mrv2 is now deprecated. It is not recommended for usage. It is " "better to use MapR 5.1.0.mrv2 instead. This version of plugin will be " "removed in the Ocata release." msgid "MapR 5.1.0.mrv2 is now Enabled." msgstr "MapR 5.1.0.mrv2 is now Enabled." msgid "" "MapR 5.1.0.mrv2 is now deprecated and will be removed in Ocata release. It " "is recommended to use MapR 5.2.0.mrv2 instead." msgstr "" "MapR 5.1.0.mrv2 is now deprecated and will be removed in the Ocata release. " "It is recommended to use MapR 5.2.0.mrv2 instead." msgid "" "MapR repositories now can be configured in general section of cluster " "template configs" msgstr "" "MapR repositories now can be configured in general section of cluster " "template configs" msgid "Migration from novaclient.v2.images to glanceclient" msgstr "Migration from novaclient.v2.images to glanceclient" msgid "Mitaka Series Release Notes" msgstr "Mitaka Series Release Notes" msgid "Move notifications options into oslo_messaging_notifications" msgstr "Move notifications options into oslo_messaging_notifications" msgid "" "Neutron is used by default now (use_neutron=True). Nova-network is not " "functionaly for most use cases starting from Ocata." msgstr "" "Neutron is used by default now (use_neutron=True). Nova-Network is not " "functional for most use cases starting from Ocata." msgid "New Features" msgstr "New Features" msgid "" "New framework for ``sahara-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Sahara " "upgrade to ensure if the upgrade can be performed safely." msgstr "" "New framework for ``sahara-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Sahara " "upgrade to ensure if the upgrade can be performed safely." msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "" "Nova network has been fully removed from the OpenStack codebase, remove all " "instances of switches on use_neutron and the configuration value." msgstr "" "Nova Network has been fully removed from the OpenStack codebase, remove all " "instances of switches on use_neutron and the configuration value." msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Open ports for Kafka in Ambari plugin, closed bug 1512661" msgstr "Open ports for Kafka in Ambari plugin, closed bug 1512661" msgid "" "OpenStack Key Manager service can now be used by sahara to enable storage of " "sensitive information in an external service such as barbican." msgstr "" "OpenStack Key Manager service can now be used by Sahara to enable storage of " "sensitive information in an external service such as Barbican." msgid "" "Operator can now use new CLI tool ``sahara-status upgrade check`` to check " "if Sahara deployment can be safely upgraded from N-1 to N release." msgstr "" "Operator can now use new CLI tool ``sahara-status upgrade check`` to check " "if Sahara deployment can be safely upgraded from N-1 to N release." msgid "" "Operators can now update the running configuration of Sahara processes by " "sending the parent process a \"HUP\" signal. Note: The configuration option " "must support mutation." msgstr "" "Operators can now update the running configuration of Sahara processes by " "sending the parent process a \"HUP\" signal. Note: The configuration option " "must support mutation." msgid "Option 'infrastructure engine' is removed from sahara configs." msgstr "Option 'infrastructure engine' is removed from Sahara configs." msgid "Other Notes" msgstr "Other Notes" msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "Prelude" msgstr "Prelude" msgid "" "Python 2.7 support has been dropped. Last release of sahara to support " "python 2.7 is OpenStack Train. The minimum version of Python now supported " "by sahara is Python 3.6." msgstr "" "Python 2.7 support has been dropped. Last release of sahara to support " "python 2.7 is OpenStack Train. The minimum version of Python now supported " "by sahara is Python 3.6." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "" "Refactoring the logic on how floating ips are used by Sahara. This change " "will allow the coexistence of cluster using floating ips with cluster that " "do not." msgstr "" "Refactoring the logic on how Floating IPs are used by Sahara. This change " "will allow the coexistence of cluster using Floating IPs with cluster that " "do not." msgid "" "Remove Impala 1.4.1, HBase 0.98.9, Hive 1.0, Hue 3.7 support in MapR 5.1.0 " "plugin" msgstr "" "Remove Impala 1.4.1, HBase 0.98.9, Hive 1.0, Hue 3.7 support in MapR 5.1.0 " "plugin" msgid "" "Remove hive.server2.authentication property from hive-site.xml for Hue 3.9.0 " "in MapR plugin." msgstr "" "Remove hive.server2.authentication property from hive-site.xml for Hue 3.9.0 " "in MapR plugin." msgid "Remove support for Spark standalone in MapR plugin" msgstr "Remove support for Spark standalone in MapR plugin" msgid "" "Remove the step \"upload httpclient to oozie/sharelib\" in sahara code. User " "should use latest vanilla-2.8.2 image which is built on SIE \"Change-ID: " "I3a25ee8c282849911089adf6c3593b1bb50fd067\"." msgstr "" "Remove the step \"upload httpclient to oozie/sharelib\" in sahara code. User " "should use latest vanilla-2.8.2 image which is built on SIE \"Change-ID: " "I3a25ee8c282849911089adf6c3593b1bb50fd067\"." msgid "Removed support for the MapR 5.0.0 plugin." msgstr "Removed support for the MapR 5.0.0 plugin." msgid "Removed support for the Spark 1.0.0 plugin." msgstr "Removed support for the Spark 1.0.0 plugin." msgid "Removed support of Vanilla 2.6.0 plugin." msgstr "Removed support of Vanilla 2.6.0 plugin." msgid "Removed unneeded volume serialization" msgstr "Removed unneeded volume serialisation" msgid "Rocky Series Release Notes" msgstr "Rocky Series Release Notes" msgid "" "Sahara APIv2 is reaching a point of maturity. Therefore, new deployments " "should include an **unversioned** endpoint in the service catalog for the " "\"data-processing\" service, for the purposes of more intuitive version " "discovery. Eventually existing deployments should switch to an unversioned " "endpoint, too, but only after time is given for the use of older clients to " "be less likely." msgstr "" "Sahara APIv2 is reaching a point of maturity. Therefore, new deployments " "should include an **unversioned** endpoint in the service catalogue for the " "\"data-processing\" service, for the purposes of more intuitive version " "discovery. Eventually existing deployments should switch to an unversioned " "endpoint, too, but only after time is given for the use of older clients to " "be less likely." msgid "Sahara Release Notes" msgstr "Sahara Release Notes" msgid "Sahara's APIv2 is now considered stable, and no longer experimental." msgstr "Sahara's APIv2 is now considered stable, and no longer experimental." msgid "" "Sahara's APIv2 is now exposed by default (although its state is still " "experimental). It has feature parity with Sahara's APIv1.1, but APIv2 brings " "better REST semantics, tweaks to some response payloads, and some other " "improvements. APIv2 will remain labeled experimental until it is stabilized " "following the addition of new features to it in the coming cycle(s)." msgstr "" "Sahara's APIv2 is now exposed by default (although its state is still " "experimental). It has feature parity with Sahara's APIv1.1, but APIv2 brings " "better REST semantics, tweaks to some response payloads, and some other " "improvements. APIv2 will remain labelled experimental until it is stabilised " "following the addition of new features to it in the upcoming cycle(s)." msgid "" "Sample configuration files previously installed in share/sahara will now be " "installed into etc/sahara instead." msgstr "" "Sample configuration files previously installed in share/Sahara will now be " "installed into etc/Sahara instead." msgid "" "Separate transport url can be used for notifications purposes now, to enable " "this feature 'transport_url' should be provided in " "'oslo_messaging_notifications' section." msgstr "" "Separate transport URL can be used for notifications purposes now, to enable " "this feature 'transport_url' should be provided in " "'oslo_messaging_notifications' section." msgid "" "Some polishings to APIv2 have been made in an effort to bring it from " "experimental (and therefore, evolving and unpredictable) to stable. More " "instances of `tenant_id` have been changed to `project_id`, in the cluster " "and job template APIs. `job_id` was changed to `job_template_id` in the job " "API. The newly-minted query string validation feature has been fixed to " "allow `show_progress` as a parameter on cluster GET; on a similar note some " "APIv2 endpoints which previously could be filtered by `hadoop_version` are " "now filtered by `plugin_version` instead. Also, the schema for cluster PATCH " "in APIv1.1 now no longer includes the key `update_keypair`; its prior " "inclusion was a mistake." msgstr "" "Some polishings to APIv2 have been made in an effort to bring it from " "experimental (and therefore, evolving and unpredictable) to stable. More " "instances of `tenant_id` have been changed to `project_id`, in the cluster " "and job template APIs. `job_id` was changed to `job_template_id` in the job " "API. The newly-minted query string validation feature has been fixed to " "allow `show_progress` as a parameter on cluster GET; on a similar note some " "APIv2 endpoints which previously could be filtered by `hadoop_version` are " "now filtered by `plugin_version` instead. Also, the schema for cluster PATCH " "in APIv1.1 now no longer includes the key `update_keypair`; its prior " "inclusion was a mistake." msgid "Spark version 1.3.1 is deprecated." msgstr "Spark version 1.3.1 is deprecated." msgid "Spark workers are found by fqdn" msgstr "Spark workers are found by FQDN" msgid "Start using reno to manage release notes." msgstr "Start using Reno to manage release notes." msgid "Starting Ambari clusters on Centos 7 is fixed." msgstr "Starting Ambari clusters on Centos 7 is fixed." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "Storm 1.1.0 is supported in Storm plugin." msgstr "Storm 1.1.0 is supported in Storm plugin." msgid "Storm version 0.9.2 is deprecated." msgstr "Storm version 0.9.2 is deprecated." msgid "Support deploy hadoop 2.7.5 with vanilla plugin." msgstr "Support deploy Hadoop 2.7.5 with vanilla plugin." msgid "Support deploy hadoop 2.8.2 with vanilla plugin." msgstr "Support deploy Hadoop 2.8.2 with vanilla plugin." msgid "" "Support for nova-network is removed, reflective of its removal from nova " "itself and from python-novaclient. use_neutron=False is unsupported." msgstr "" "Support for Nova-Network is removed, reflective of its removal from Nova " "itself and from python-novaclient. use_neutron=False is unsupported." msgid "Support of HDP 2.0.6 plugin was removed. Use Ambari plugin instead." msgstr "Support of HDP 2.0.6 plugin was removed. Use Ambari plugin instead." msgid "" "The 'enable' option of the 'oslo_messaging_notifications' section has been " "removed. To enable notifications now please specify the 'driver' option in " "the same section." msgstr "" "The 'enable' option of the 'oslo_messaging_notifications' section has been " "removed. To enable notifications now please specify the 'driver' option in " "the same section." msgid "" "The HDP 2.0.6 plugin is deprecated in Mitaka release and will be removed in " "Newton release. Please, use the Ambari 2.3 instead." msgstr "" "The HDP 2.0.6 plugin is deprecated in Mitaka release and will be removed in " "Newton release. Please, use the Ambari 2.3 instead." msgid "" "The URL of an S3 data source may have `s3://` or `s3a://`, equivalently." msgstr "" "The URL of an S3 data source may have `s3://` or `s3a://`, equivalently." msgid "" "The `sahara-all` entry point is now deprecated. Please use the sahara-api " "and sahara-engine entry points instead." msgstr "" "The `sahara-all` entry point is now deprecated. Please use the Sahara-API " "and sahara-engine entry points instead." msgid "" "The ability to force delete clusters is exposed in Sahara APIv2. The Heat " "service must support Stack Abandon for force delete to function properly." msgstr "" "The ability to force delete clusters is exposed in Sahara APIv2. The Heat " "service must support Stack Abandon for force delete to function properly." msgid "" "The behavior of force deletion of clusters (APIv2) has changed. Stack-" "abandon is no longer used. The response from the force-delete API call now " "includes the name of the stack which had underlain that deleted cluster." msgstr "" "The behaviour of force deletion of clusters (APIv2) has changed. Stack-" "abandon is no longer used. The response from the force-delete API call now " "includes the name of the stack which had underlain that deleted cluster." msgid "" "The command hdfs fs has been deprecated in favor of hdfs fs. This fixes will " "allow the use of Hbase service." msgstr "" "The command hdfs fs has been deprecated in favour of hdfs fs. This fixes " "will allow the use of Hbase service." msgid "" "The custom admin_user_domain_name and admin_project_domain_name " "configuration options have been removed; they are provided by " "keystone_authtoken as user_domain_name and project_domain_name respectively." msgstr "" "The custom admin_user_domain_name and admin_project_domain_name " "configuration options have been removed; they are provided by " "keystone_authtoken as user_domain_name and project_domain_name respectively." msgid "" "The default proxy role for Swift is now member instead of Member. Keystone " "now creates the former by default, even if the latter is recognized to be " "the same (case preserving)." msgstr "" "The default proxy role for Swift is now member instead of Member. Keystone " "now creates the former by default, even if the latter is recognised to be " "the same (case preserving)." msgid "" "The experimental APIv2 supports simultaneous creation of multiple clusters " "only through POST /v2/clusters (using the `count` parameter). The POST /v2/" "clusters/multiple endpoint has been removed." msgstr "" "The experimental APIv2 supports simultaneous creation of multiple clusters " "only through POST /v2/clusters (using the `count` parameter). The POST /v2/" "clusters/multiple endpoint has been removed." msgid "" "The following service versions were added to MapR 5.2.0 plugin - HBase 1.1 - " "Drill 1.6 - Mahout 0.11 0.12 - Spark 1.6.1 - Impala 2.5" msgstr "" "The following service versions were added to MapR 5.2.0 plugin - HBase 1.1 - " "Drill 1.6 - Mahout 0.11 0.12 - Spark 1.6.1 - Impala 2.5" msgid "" "The following service versions were added to MapR 5.2.0 plugin - Pig 0.16 - " "Spark 2.0.1 - Hue 3.10 - Drill 1.8, 1.9" msgstr "" "The following service versions were added to MapR 5.2.0 plugin - Pig 0.16 - " "Spark 2.0.1 - Hue 3.10 - Drill 1.8, 1.9" msgid "The presence of project ID in Sahara APIv1 paths is now optional." msgstr "The presence of project ID in Sahara APIv1 paths is now optional." msgid "" "The use of [keystone_authtoken] credentials for trust creation is now " "deprecated. Please use the new [trustee] config section. The options " "``username``, ``password``, ``project_name``, ``user_domain_name``, " "``project_domain_name``, and ``auth_url`` (with version) are obligatory " "within that section." msgstr "" "The use of [keystone_authtoken] credentials for trust creation is now " "deprecated. Please use the new [trustee] config section. The options " "``username``, ``password``, ``project_name``, ``user_domain_name``, " "``project_domain_name``, and ``auth_url`` (with version) are obligatory " "within that section." msgid "" "This feature allows the policy enforcement to be done in code thus " "facilitating better maintenance of the policy file. In code the default " "policies are set and the operator only needs to change the policy file if " "they wish to override the rule or role for a specific policy or operation. " "Also, a complete policy file can be generated using genconfig tool." msgstr "" "This feature allows the policy enforcement to be done in code thus " "facilitating better maintenance of the policy file. In code the default " "policies are set and the operator only needs to change the policy file if " "they wish to override the rule or role for a specific policy or operation. " "Also, a complete policy file can be generated using genconfig tool." msgid "" "This fixes the issue with NTP configuration where a prefered server provided " "by the user is added to the end of the file and the defaults are not " "deleted. Here we add the prefered server to the top of the file." msgstr "" "This fixes the issue with NTP configuration where a preferred server " "provided by the user is added to the end of the file and the defaults are " "not deleted. Here we add the preferred server to the top of the file." msgid "This patch adds ability to work with swift by using Keystone API v3" msgstr "This patch adds ability to work with swift by using Keystone API v3" msgid "Timeouts for ssh operations are configurable now" msgstr "Timeouts for SSH operations are configurable now" msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "" "Use a new keypair to access to the running cluster when the cluster's " "keypair is deleted." msgstr "" "Use a new keypair to access to the running cluster when the cluster's " "keypair is deleted." msgid "" "Users of Sahara's APIv2 may request a microversion of that API, with " "\"OpenStack-API-Version: data-processing [version]\" in the request headers." msgstr "" "Users of Sahara's APIv2 may request a microversion of that API, with " "\"OpenStack-API-Version: data-processing [version]\" in the request headers." msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Version 5.5.0 of Cloudera plugin is deprecated." msgstr "Version 5.5.0 of Cloudera plugin is deprecated." msgid "" "Versions 5, 5.3.0, 5.4.0 of Cloudera plugin are deprecated. It is no longer " "maintainted and supposed to be removed in P release." msgstr "" "Versions 5, 5.3.0, 5.4.0 of Cloudera plugin are deprecated. It is no longer " "maintained and supposed to be removed in P release." msgid "Versions 5.0.0 5.3.0 and 5.4.0 of Cloudera plugin are removed." msgstr "Versions 5.0.0 5.3.0 and 5.4.0 of Cloudera plugin are removed." msgid "Versions 5.5.0 and 5.7.0 of Cloudera plugin are declared as stable." msgstr "Versions 5.5.0 and 5.7.0 of Cloudera plugin are declared as stable." msgid "Wait condition feature can be disabled by the option." msgstr "Wait condition feature can be disabled by the option." msgid "" "We are assuring that ironic support was tested after latest updates to nova " "and sahara and it is fully functional." msgstr "" "We are assuring that Ironic support was tested after latest updates to Nova " "and Sahara and it is fully functional." msgid "We are deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 and Storm 1.0.1." msgstr "We are deprecating CDH 5.7.0, Spark 1.6.0 and 2.1 and Storm 1.0.1." msgid "" "We are removing some plugins versions. Those are CDH 5.5.0, MapR 5.1.0, " "Spark 1.3.1 and Storm 0.9.2." msgstr "" "We are removing some plugins versions. Those are CDH 5.5.0, MapR 5.1.0, " "Spark 1.3.1 and Storm 0.9.2." msgid "" "With APIv2 we detected some inconsistencies on the policies. This patch " "updates the policy to fix those incosistencies." msgstr "" "With APIv2 we detected some inconsistencies on the policies. This patch " "updates the policy to fix those inconsistencies." msgid "" "migration from keystoneclient to keystoneauth is done for using auth " "features of keystone." msgstr "" "migration from keystoneclient to keystoneauth is done for using auth " "features of Keystone." ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000020717 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000022504 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000245100000000000025537 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: sahara\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2018-08-09 19:13+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 06:34+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "3.0.1" msgstr "3.0.1" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "4.1.0" msgstr "4.1.0" msgid "5.0.0" msgstr "5.0.0" msgid "Bug Fixes" msgstr "Corrections de bugs" msgid "Current Series Release Notes" msgstr "Note de la release actuelle" msgid "Deprecation Notes" msgstr "Notes dépréciées " msgid "Known Issues" msgstr "Problèmes connus" msgid "Liberty Series Release Notes" msgstr "Note de release pour Liberty" msgid "Mitaka Series Release Notes" msgstr "Note de release pour Mitaka" msgid "New Features" msgstr "Nouvelles fonctionnalités" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Other Notes" msgstr "Autres notes" msgid "Sahara Release Notes" msgstr "Note de release de Sahara" msgid "Upgrade Notes" msgstr "Notes de mises à jours" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/releasenotes/source/locale/it/0000775000175000017500000000000000000000000020724 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/releasenotes/source/locale/it/LC_MESSAGES/0000775000175000017500000000000000000000000022511 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/locale/it/LC_MESSAGES/releasenotes.po0000664000175000017500000004501000000000000025542 0ustar00zuulzuul00000000000000# Luigi Toscano , 2018. #zanata # Luigi Toscano , 2019. #zanata msgid "" msgstr "" "Project-Id-Version: sahara\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2020-04-23 21:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2019-07-02 10:35+0000\n" "Last-Translator: Luigi Toscano \n" "Language-Team: Italian\n" "Language: it\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "10.0.0" msgstr "10.0.0" msgid "3.0.1" msgstr "3.0.1" msgid "4.0.0" msgstr "4.0.0" msgid "4.0.1" msgstr "4.0.1" msgid "4.1.0" msgstr "4.1.0" msgid "5.0.0" msgstr "5.0.0" msgid "6.0.0" msgstr "6.0.0" msgid "7.0.0" msgstr "7.0.0" msgid "7.0.3" msgstr "7.0.3" msgid "8.0.0" msgstr "8.0.0" msgid "8.0.1" msgstr "8.0.1" msgid "8.0.2" msgstr "8.0.2" msgid "9.0.0" msgstr "9.0.0" msgid "9.0.1" msgstr "9.0.1" msgid "" "A few responses in the experimental (but nearly-stable) APIv2 have been " "tweaked. To be specific, the key `hadoop_version` has been replaced with " "`plugin_version`, the key `job` has been replaced with `job_template`, the " "key `job_execution` has been replaced with `job`, and the key `oozie_job_id` " "has been replaced with `engine_job_id`. In fact, these changes were all " "previously partially implemented, and are now completely implemented." msgstr "" "Alcune risposte delle APIv2 - sperimentali, ma quasi stabili - sono state " "rifinite. In particolare la chiave `hadoop_version` è stata sostituita da " "`plugin_version`, la chiave `job` è stata sostituita da `job_template`, la " "chiave `job_execution` è stata sostituita da `job`, e la chiave " "`oozie_job_id` è stata sostituita da `engine_job_ide`. In effetti questi " "cambiamenti erano già stati tutti parzialmente implementati, ed ora lo sono " "totalmente." msgid "Add Kafka to MapR plugin" msgstr "Aggiunto Kafka al plugin MapR" msgid "Add Sentry to MapR plugin" msgstr "Aggiunto Sentry al plugin MapR" msgid "Add ability of scheduling EDP jobs for sahara" msgstr "Aggiunta la capacità di pianificare job EDP in sahara" msgid "" "Add ability to automaticaly generate better configurations for Ambari " "cluster by using 'ALWAYS_APPLY' strategy" msgstr "" "Aggiunta la capacità di generare automaticamente configurazioni migliori per " "cluster Ambari usando a strategia 'ALWAYS_APPLY'" msgid "Add impala 2.2 to MapR plugin" msgstr "Aggiunto Impala 2.2 al plugin MapR" msgid "" "Add regular expression matching on search values for certain string fields " "of sahara objects. This applies to list operations through the REST API and " "therefore applies to the dashboard and sahara client as well. Closes bug " "1503345." msgstr "" "Aggiunta la corrispendenza in base ad un'espressione regolare per i valori " "di ricerca su determinati campi degli oggetti di sahara. Questo si applica " "alle operazioni di elenco (list) in tutte le API REST e si applica quindi " "anche alla dashboard ed a saharaclient. Chiude il bug 1503345." msgid "Added additional filter to volume_type check, closed bug 1508017" msgstr "" "Aggiunto un ulteriore filtro al controllo volume_type, chiudendo il bug " "1508017" msgid "Added event log for HDP plugin" msgstr "Aggiunto il registro degli eventi per il plugin HDP" msgid "" "Added integration of Designate for hostname resolution through dns servers" msgstr "" "Aggiunta l'integrazione con Designate per la risoluzione dei nomi degli host " "tramite server DNS" msgid "Added new tool ``sahara-status upgrade check``." msgstr "Aggiunto il nuovo strumento ``sahara-status upgrade check``." msgid "Added rack awareness feature for CDH 5.5 and CDH 5.7" msgstr "Aggiunta la funzionalità di conoscenza del rack per CDH 5.5 e CDH 5.7" msgid "Added rack awareness feature for HDP plugin" msgstr "Aggiunta la funzionalità di conoscenza del rack per il plugin HDP" msgid "" "Added support of running sahara-api as wsgi application. Use 'sahara-wsgi-" "api' command for use this feature." msgstr "" "Aggiunto il supporto per eseguire sahara-api come applicazione wsgi. La " "nuova funzione si può usare tramite il comando 'sahara-wsgi-api'." msgid "Adding Spark 2.3 to supported plugins list." msgstr "Aggiunto Spark 2.3 all'elenco dei plugin supportati." msgid "Adding Spark version 2.2 to Sahara." msgstr "Aggiunto Spark versione 2.2 a Sahara." msgid "" "Adding ability to create and validate MapR 5.2.0 images using the new image " "gen tool." msgstr "" "Aggiunta la capacità di creare e validare immagini MapR 5.2.0 usando il " "nuovo strumento di generazione delle immagini." msgid "" "Adding new versions of Storm, 1.2.0 and 1.2.1. Both will exist under the " "same tag 1.2." msgstr "" "Aggiunte nuove versioni di Storm, 1.2.0 e 1.2.1. Entrambe sono disponibili " "tramite lo stesso tag 1.2." msgid "Adding support to CDH 5.13.0 in CDH plugin." msgstr "Aggiunto il supporto per CDH 5.13.0 nel plugin CDH." msgid "" "Adding the ability to boot a Sahara cluster from volumes instead of images." msgstr "" "Aggiunta la capacità di effettuare il boot di un cluster Sahara da volumi " "invece che da immagini." msgid "" "Adding the ability to change default timeout parameter for ambari agent " "package installation" msgstr "" "Aggiunta la capacità di cambiare il parametro predefinito di timeout per " "l'installazione del pacchetto dell'agente Ambari" msgid "Adding the ability to create Ambari 2.6 images on sahara-image-pack" msgstr "Aggiunta a sahara-image-pack la capacità di creare immagini Ambari 2.6" msgid "" "After decommissioning hosts all assoicated configs groups will be removed in " "ambari plugin." msgstr "" "Dopo la dismissione di un host, tutti i gruppi di configurazione associati " "saranno rimossi dal plugin Ambari" msgid "" "An EDP data source may reference a file stored in a S3-like object store." msgstr "" "Una fonte dati EDP può fare riferimento ad un file memorizzato in un gestore " "di oggetti con interfaccia S3." msgid "Bug Fixes" msgstr "Correzione di bug" msgid "CDH 5.11.0 is supported in CDH plugin." msgstr "CDH 5.11.0 è supportato nel plugin CDH." msgid "CDH 5.5.0 is supported in CDH plugin." msgstr "CDH 5.5.0 è supportato nel plugin CDH." msgid "CDH 5.7 is supported in CDH plugin." msgstr "CDH 5.7 è supportato nel plugin CDH." msgid "CDH 5.9.0 is supported in CDH plugin." msgstr "CDH 5.9.0 è supportato nel plugin CDH." msgid "" "Cluster with 'is_protected' field can be created now, closed bug 1510929" msgstr "" "Adesso possono essere creati dei cluster con il campo 'is_protected', " "chiudendo il bug 1510929" msgid "" "Convert to cluster template feature is no longer supported by all plugins." msgstr "" "La funzionalità di conversione in un modello di cluster non è più supportata " "da tutti i plugin." msgid "Correctly configure Spark with Hive and HBase on different nodes." msgstr "Configura correttamente Spark con Hive e HBase su nodi diversi." msgid "Current Series Release Notes" msgstr "Note di rilascio per la serie attuale" msgid "Custom health check is added to MapR plugin" msgstr "" "Il controllo personalizzato dello stato di salute è stato aggiunto al plugin " "MapR" msgid "Deprecation Notes" msgstr "Note relative alle deprecazioni" msgid "" "Enables the creation and validation of Ambari 2.4 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Permette la creazione e la validazione di immagini Ambari 2.4 usando il " "nuovo processo di generazione delle immagini in cui libguestfs sostituisce " "l'uso di DIB." msgid "" "Enables the creation and validation of CDH 5.11.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Permette la creazione e la validazione di immagini CDH 5.11.0 usando il " "nuovo processo di generazione delle immagini in cui libguestfs sostituisce " "l'uso di DIB." msgid "" "Enables the creation and validation of CDH 5.7.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Permette la creazione e la validazione di immagini CDH 5.7.0 usando il nuovo " "processo di generazione delle immagini in cui libguestfs sostituisce l'uso " "di DIB." msgid "" "Enables the creation and validation of CDH 5.9.0 images using the new image " "generation process where libguestfs replaces the use of DIB." msgstr "" "Permette la creazione e la validazione di immagini CDH 5.9.0 usando il nuovo " "processo di generazione delle immagini in cui libguestfs sostituisce l'uso " "di DIB." msgid "Fix Hue intergation with Spark and Hive on different nodes" msgstr "Corretta l'integrazione di Hue con Spark e Hive su nodi diversi" msgid "Fix creating Hive database schema automatically." msgstr "Corretta la creazione automatica di schemi di database di Hive" msgid "Fix unavailable MCS link." msgstr "Corretto il collegamento MCS non disponibile" msgid "Fix visualization of MapR versions when running on python 3" msgstr "" "Corretta la visualizzazione delle versioni di MapR quando viene usato python " "3" msgid "Fixed api_insecure handling in sessions. Closed bug 1539498." msgstr "" "Corretta la gestione di api_insecure nelle sessioni. Risolve il bug 1539498." msgid "Fixed arguments list in latest cdh plugin" msgstr "Corretto l'elenco degli argomenti nel plugin CDH più recente" msgid "Fixed client tempest tests" msgstr "Corretti i test tempest per il client" msgid "Fixed issues with using Swift as an output datasource." msgstr "Corretti alcuni problemi quando si usa Swift come fonte dati d'uscita." msgid "Fixed launching Hive jobs in Ambari plugin." msgstr "Corretta l'esecuzione di job Hive nel plugin Ambari." msgid "Kafka was added in CDH 5.5 and CDH 5.7" msgstr "Kafka è stato aggiunto a CDH 5.5 e CDH 5.7" msgid "Known Issues" msgstr "Problemi noti" msgid "Liberty Series Release Notes" msgstr "Note di rilascio per la serie Liberty" msgid "MaR 5.2.0 is supported in MapR plugin." msgstr "MapR 5.2.0 è supportato nel plugin MapR" msgid "MapR 5.1.0.mrv2 is now Enabled." msgstr "MapR 5.1.0.mrv2 è ora abilitato." msgid "" "MapR repositories now can be configured in general section of cluster " "template configs" msgstr "" "I repository MapR possono essere configurati nella sezione generale della " "configurazione dei template di cluster." msgid "Migration from novaclient.v2.images to glanceclient" msgstr "Migrazione da novaclient.v2.images a glanceclient" msgid "Mitaka Series Release Notes" msgstr "Note di rilascio per la serie Mitaka" msgid "Move notifications options into oslo_messaging_notifications" msgstr "Opzioni delle notifiche spostate in oslo_messaging_notifications" msgid "" "Neutron is used by default now (use_neutron=True). Nova-network is not " "functionaly for most use cases starting from Ocata." msgstr "" "Neutron viene ora usato in modo predefinito (use_neutron=True). Nova-network " "non funziona per la maggior parte dei casi d'uso a partire da Ocata." msgid "New Features" msgstr "Nuove funzionalità" msgid "" "New framework for ``sahara-status upgrade check`` command is added. This " "framework allows adding various checks which can be run before a Sahara " "upgrade to ensure if the upgrade can be performed safely." msgstr "" "È stata aggiunta una nuova infrastruttura per supportare il comando ``sahara-" "status upgrade check``. Questa infrastruttura permette l'aggiunta di vari " "controlli che possono essere eseguiti prima di un aggiornamento di Sahara " "per assicurarsi che l'aggiornamento possa essere eseguito in sicurezza." msgid "Newton Series Release Notes" msgstr "Note di rilascio per la serie Newton" msgid "Ocata Series Release Notes" msgstr "Note di rilascio per la serie Ocata" msgid "Open ports for Kafka in Ambari plugin, closed bug 1512661" msgstr "Apertura delle porte per Kafka nel plugin Ambari, bug 1512661 chiuso" msgid "" "Operator can now use new CLI tool ``sahara-status upgrade check`` to check " "if Sahara deployment can be safely upgraded from N-1 to N release." msgstr "" "Gli operatori possono usare il nuovo strumento a riga di comando ``sahara-" "status upgrade check`` per verificare che l'intallazione di Sahara può " "essere aggiornata in sicurezza dalla versione N-1 alla N." msgid "Other Notes" msgstr "Altre note" msgid "Pike Series Release Notes" msgstr "Note di rilascio per la serie Pike" msgid "Prelude" msgstr "Preludio" msgid "Queens Series Release Notes" msgstr "Note di rilascio per la serie Queens" msgid "" "Remove Impala 1.4.1, HBase 0.98.9, Hive 1.0, Hue 3.7 support in MapR 5.1.0 " "plugin" msgstr "" "Rimozione del supporto di Impala 1.4.1, HBase 0.98.9, Hive 1.0, Hue 3.7 dal " "plugin MapR 5.1.0." msgid "Removed support for the MapR 5.0.0 plugin." msgstr "Rimosso il supporto per il plugin MapR 5.0.0." msgid "Removed support for the Spark 1.0.0 plugin." msgstr "Rimosso il supporto per il plugin Spark 1.0.0." msgid "Removed support of Vanilla 2.6.0 plugin." msgstr "Rimosso il supporto per il plugin Vanilla 2.6.0." msgid "Removed unneeded volume serialization" msgstr "Rimossa la serializzazione non necessaria dei volumi" msgid "Rocky Series Release Notes" msgstr "Note di rilascio per la serie Queens" msgid "Sahara Release Notes" msgstr "Note di rilascio di Sahara" msgid "Sahara's APIv2 is now considered stable, and no longer experimental." msgstr "L'APIv2 di Sahara è ora considerata stabile e non più sperimentale." msgid "" "Sample configuration files previously installed in share/sahara will now be " "installed into etc/sahara instead." msgstr "" "I file di configurazione di Sahara installati in precedenza in share/sahara " "saranno installati invece in etc/sahara." msgid "Spark version 1.3.1 is deprecated." msgstr "La versione 1.3.1 di Spark è deprecata." msgid "Spark workers are found by fqdn" msgstr "I nodi worker di Spark sono individuati tramite FQDN" msgid "Start using reno to manage release notes." msgstr "Inizio dell'uso di reno per gestire le note di rilascio." msgid "Starting Ambari clusters on Centos 7 is fixed." msgstr "L'avvio di cluster Ambari su CentOS 7 è stato corretto." msgid "Stein Series Release Notes" msgstr "Note di rilascio per la serie Stein" msgid "Storm 1.1.0 is supported in Storm plugin." msgstr "Storm 1.1.0 è supportato dal plugin Storm." msgid "Support deploy hadoop 2.7.5 with vanilla plugin." msgstr "" "Supporto per la creazione di cluster con hadoop 2.7.5 nel plugin vanilla" msgid "Support deploy hadoop 2.8.2 with vanilla plugin." msgstr "" "Supporto per la creazione di cluster con hadoop 2.8.2 nel plugin vanilla" msgid "" "Support for nova-network is removed, reflective of its removal from nova " "itself and from python-novaclient. use_neutron=False is unsupported." msgstr "" "Il supporto a nova-network è stato rimosso a seguito della sua rimozione da " "nova stesso e da python-novaclient. use_neutron=False non è più supportato." msgid "Support of HDP 2.0.6 plugin was removed. Use Ambari plugin instead." msgstr "" "È stato rimosso il supporto per il plugin HDP 2.0.6. Al suo posto va usato " "il plugin Ambari." msgid "" "The URL of an S3 data source may have `s3://` or `s3a://`, equivalently." msgstr "" "L'URL di una fonte dati S3 può contenere `s3://` o `s3a://`; sono " "equivalenti." msgid "" "The ability to force delete clusters is exposed in Sahara APIv2. The Heat " "service must support Stack Abandon for force delete to function properly." msgstr "" "La possibilità di eliminare forzatamente i cluster è stata resa disponibile " "nell'APIv2 di Sahara. Il servizio Heat deve supportare la funzione di " "abbandono dello stack (Stack Abandon) affinché la rimozione forzata funzioni " "correttamente." msgid "" "The behavior of force deletion of clusters (APIv2) has changed. Stack-" "abandon is no longer used. The response from the force-delete API call now " "includes the name of the stack which had underlain that deleted cluster." msgstr "" "Il comportamento della rimozione forzata dei cluster (APIv2) è cambiato. La " "funzione di abbandono dei cluster non è più usata. La risposta alla chiamata " "dell'API force-delete adesso include il nome dello stack che era alla base " "del cluster eliminato." msgid "" "The experimental APIv2 supports simultaneous creation of multiple clusters " "only through POST /v2/clusters (using the `count` parameter). The POST /v2/" "clusters/multiple endpoint has been removed." msgstr "" "APIv2, la versione sperimentale delle API, supporta la creazione simultanea " "di più cluster esclusivamente tramite POST /v2/clusters (usando il parametro " "`count`). L'endpoint POST /v2/clusters/multiple endpoint è stato rimosso." msgid "" "The following service versions were added to MapR 5.2.0 plugin - HBase 1.1 - " "Drill 1.6 - Mahout 0.11 0.12 - Spark 1.6.1 - Impala 2.5" msgstr "" "Sono stati aggiunte le seguenti versioni di servizi al plugin MapR 5.2.0 - " "HBase 1.1 - Drill 1.6 - Mahout 0.11 0.12 - Spark 1.6.1 - Impala 2.5" msgid "" "The following service versions were added to MapR 5.2.0 plugin - Pig 0.16 - " "Spark 2.0.1 - Hue 3.10 - Drill 1.8, 1.9" msgstr "" "Sono stati aggiunte le seguenti versioni di servizi al plugin MapR 5.2.0 - " "Pig 0.16 - Spark 2.0.1 - Hue 3.10 - Drill 1.8, 1.9" msgid "The presence of project ID in Sahara APIv1 paths is now optional." msgstr "" "La presenza dell'ID del progetto nei percorsi della APIv1 di Sahara è ora " "opzionale." msgid "This patch adds ability to work with swift by using Keystone API v3" msgstr "Questa patch permette di usare swift usando l'API v3 di Keystone" msgid "Upgrade Notes" msgstr "Note di aggiornamento" msgid "" "Users of Sahara's APIv2 may request a microversion of that API, with " "\"OpenStack-API-Version: data-processing [version]\" in the request headers." msgstr "" "Gli utenti dell'APIv2 di Sahara possono richiedere una specifica " "microversione dell'API tramite \"OpenStack-API-Version: data-processing " "[versione]\" nelle intestazioni della richiesta." msgid "Versions 5.0.0 5.3.0 and 5.4.0 of Cloudera plugin are removed." msgstr "" "Le versioni 5.0.0, 5.3.0 e 5.4.0 del plugin Cloudera sono state rimosse." msgid "Versions 5.5.0 and 5.7.0 of Cloudera plugin are declared as stable." msgstr "" "Le versioni 5.5.0 e 5.7.0 del plugin Cloudera sono state dichiarate stabili." msgid "" "We are removing some plugins versions. Those are CDH 5.5.0, MapR 5.1.0, " "Spark 1.3.1 and Storm 0.9.2." msgstr "" "Abbiamo rimosso alcune versioni di plugin. Si tratta di CDH 5.5.0, MapR " "5.1.0, Spark 1.3.1 e Storm 0.9.2." msgid "" "migration from keystoneclient to keystoneauth is done for using auth " "features of keystone." msgstr "" "la migrazione da keystoneclient a keystoneauth è stata effettuata per usare " "le funzionalità di autenticazione di keystone." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/mitaka.rst0000664000175000017500000000023200000000000021046 0ustar00zuulzuul00000000000000=================================== Mitaka Series Release Notes =================================== .. release-notes:: :branch: origin/stable/mitaka ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/newton.rst0000664000175000017500000000023200000000000021112 0ustar00zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023000000000000020665 0ustar00zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000020533 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000021100 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022100000000000020725 0ustar00zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000020720 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000020724 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000016000000000000021727 0ustar00zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000021127 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/victoria.rst0000664000175000017500000000021200000000000021416 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: stable/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/wallaby.rst0000664000175000017500000000020600000000000021234 0ustar00zuulzuul00000000000000============================ Wallaby Series Release Notes ============================ .. release-notes:: :branch: stable/wallaby ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/releasenotes/source/xena.rst0000664000175000017500000000017200000000000020536 0ustar00zuulzuul00000000000000========================= Xena Series Release Notes ========================= .. release-notes:: :branch: stable/xena ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/requirements.txt0000664000175000017500000000303700000000000016347 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. pbr!=2.1.0,>=2.0.0 # Apache-2.0 alembic>=0.9.6 # MIT botocore>=1.5.1 # Apache-2.0 castellan>=0.16.0 # Apache-2.0 eventlet>=0.26.0 # MIT Flask>=1.0.2 # BSD iso8601>=0.1.11 # MIT Jinja2>=2.10 # BSD License (3 clause) jsonschema>=3.2.0 # MIT keystoneauth1>=3.4.0 # Apache-2.0 keystonemiddleware>=4.17.0 # Apache-2.0 microversion-parse>=0.2.1 # Apache-2.0 oslo.config>=6.8.0 # Apache-2.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.context>=2.22.0 # Apache-2.0 oslo.db>=6.0.0 # Apache-2.0 oslo.i18n>=3.15.3 # Apache-2.0 oslo.log>=3.36.0 # Apache-2.0 oslo.messaging>=10.2.0 # Apache-2.0 oslo.middleware>=3.31.0 # Apache-2.0 oslo.policy>=3.6.0 # Apache-2.0 oslo.rootwrap>=5.8.0 # Apache-2.0 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0 oslo.service>=1.31.0 # Apache-2.0 oslo.upgradecheck>=1.3.0 # Apache-2.0 oslo.utils>=4.5.0 # Apache-2.0 paramiko>=2.7.1 # LGPLv2.1+ requests>=2.23.0 # Apache-2.0 python-cinderclient!=4.0.0,>=3.3.0 # Apache-2.0 python-keystoneclient>=3.8.0 # Apache-2.0 python-manilaclient>=1.16.0 # Apache-2.0 python-novaclient>=9.1.0 # Apache-2.0 python-swiftclient>=3.2.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 python-heatclient>=1.10.0 # Apache-2.0 python-glanceclient>=2.8.0 # Apache-2.0 stevedore>=1.20.0 # Apache-2.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT tooz>=1.58.0 # Apache-2.0 WebOb>=1.7.1 # MIT ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.593891 sahara-16.0.0/roles/0000775000175000017500000000000000000000000014204 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/roles/build-sahara-images-cli/0000775000175000017500000000000000000000000020550 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/roles/build-sahara-images-cli/README.rst0000664000175000017500000000043000000000000022234 0ustar00zuulzuul00000000000000Build Sahara Images with CLI **Role Variables** .. zuul:rolevar:: sahara_build_directory :default: /var/tmp/sahara-image-build The base directory used for the build process. .. zuul:rolevar:: sahara_plugin :default: vanilla The plugin whose images will be built. ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.681891 sahara-16.0.0/roles/build-sahara-images-cli/defaults/0000775000175000017500000000000000000000000022357 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/roles/build-sahara-images-cli/defaults/main.yaml0000664000175000017500000000026400000000000024171 0ustar00zuulzuul00000000000000--- sahara_src_dir: "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/sahara'].src_dir }}" sahara_build_directory: /var/tmp/sahara-image-build sahara_plugin: vanilla ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.685891 sahara-16.0.0/roles/build-sahara-images-cli/tasks/0000775000175000017500000000000000000000000021675 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/roles/build-sahara-images-cli/tasks/main.yaml0000664000175000017500000000056400000000000023512 0ustar00zuulzuul00000000000000- name: Installs needed plugins to build its images command: tox -e images -- pip install {{ ansible_user_dir }}/src/opendev.org/openstack/sahara-plugin-{{ sahara_plugin }} args: chdir: "{{ sahara_src_dir }}" - name: Build all the images for the selected plugin command: ./tools/gate/build-images "{{ sahara_plugin }}" args: chdir: "{{ sahara_src_dir }}" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.685891 sahara-16.0.0/sahara/0000775000175000017500000000000000000000000014317 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/__init__.py0000664000175000017500000000000000000000000016416 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.685891 sahara-16.0.0/sahara/api/0000775000175000017500000000000000000000000015070 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/__init__.py0000664000175000017500000000000000000000000017167 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/acl.py0000664000175000017500000000324200000000000016202 0ustar00zuulzuul00000000000000# # Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Sahara""" import functools from oslo_config import cfg from oslo_policy import opts from oslo_policy import policy from sahara.common import policies from sahara import context from sahara import exceptions ENFORCER = None # TODO(gmann): Remove setting the default value of config policy_file # once oslo_policy change the default value to 'policy.yaml'. # https://opendev.org/openstack/oslo.policy/src/commit/d8534850d9238e85ae0ea55bf2ac8583681fdb2b/oslo_policy/opts.py#L49 DEFAULT_POLICY_FILE = 'policy.yaml' opts.set_defaults(cfg.CONF, DEFAULT_POLICY_FILE) def setup_policy(): global ENFORCER ENFORCER = policy.Enforcer(cfg.CONF) ENFORCER.register_defaults(policies.list_rules()) def enforce(rule): def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): ctx = context.ctx() ENFORCER.authorize(rule, {}, ctx.to_dict(), do_raise=True, exc=exceptions.Forbidden) return func(*args, **kwargs) return handler return decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/base.py0000664000175000017500000000140100000000000016350 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.i18n import _ import sahara.utils.api as u def not_implemented(): return u.internal_error( 501, NotImplementedError(_("This API operation isn't implemented"))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/microversion.py0000664000175000017500000000201700000000000020161 0ustar00zuulzuul00000000000000# Copyright 2018 OpenStack Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. API_VERSIONS = ["2.0"] MIN_API_VERSION = API_VERSIONS[0] MAX_API_VERSION = API_VERSIONS[-1] LATEST = "latest" VERSION_STRING_REGEX = r"^([1-9]\d*).([1-9]\d*|0)$" OPENSTACK_API_VERSION_HEADER = "OpenStack-API-Version" VARY_HEADER = "Vary" SAHARA_SERVICE_TYPE = "data-processing" BAD_REQUEST_STATUS_CODE = 400 BAD_REQUEST_STATUS_NAME = "BAD_REQUEST" NOT_ACCEPTABLE_STATUS_CODE = 406 NOT_ACCEPTABLE_STATUS_NAME = "NOT_ACCEPTABLE" ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.689891 sahara-16.0.0/sahara/api/middleware/0000775000175000017500000000000000000000000017205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/middleware/__init__.py0000664000175000017500000000000000000000000021304 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/middleware/auth_valid.py0000664000175000017500000000426600000000000021707 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_middleware import base from oslo_utils import strutils from oslo_utils import uuidutils import webob import webob.exc as ex from sahara.i18n import _ LOG = logging.getLogger(__name__) class AuthValidator(base.Middleware): """Handles token auth results and tenants.""" @webob.dec.wsgify def __call__(self, req): """Ensures that tenants in url and token are equal. Handle incoming request by checking tenant info prom the headers and url ({tenant_id} url attribute). Pass request downstream on success. Reject request if tenant_id from headers not equals to tenant_id from url. """ token_tenant = req.environ.get("HTTP_X_TENANT_ID") if not token_tenant: LOG.warning("Can't get tenant_id from env") raise ex.HTTPServiceUnavailable() path = req.environ['PATH_INFO'] if path != '/': try: version, possibly_url_tenant, rest = ( strutils.split_path(path, 2, 3, True) ) except ValueError: LOG.warning("Incorrect path: {path}".format(path=path)) raise ex.HTTPNotFound(_("Incorrect path")) if uuidutils.is_uuid_like(possibly_url_tenant): url_tenant = possibly_url_tenant if token_tenant != url_tenant: LOG.debug("Unauthorized: token tenant != requested tenant") raise ex.HTTPUnauthorized( _('Token tenant != requested tenant')) return self.application ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/middleware/sahara_middleware.py0000664000175000017500000000512500000000000023216 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import flask from oslo_config import cfg import six from werkzeug import exceptions as werkzeug_exceptions from sahara.api import v10 as api_v10 from sahara.api import v11 as api_v11 from sahara.api import v2 as api_v2 from sahara import context from sahara.utils import api as api_utils CONF = cfg.CONF def build_app(): """App builder (wsgi). Entry point for Sahara REST API server """ app = flask.Flask('sahara.api') @app.teardown_request def teardown_request(_ex=None): context.set_ctx(None) app.register_blueprint(api_v10.rest, url_prefix='/v1.0') app.register_blueprint(api_v10.rest, url_prefix='/v1.1') app.register_blueprint(api_v11.rest, url_prefix='/v1.1') def make_json_error(ex): status_code = (ex.code if isinstance(ex, werkzeug_exceptions.HTTPException) else 500) description = (ex.description if isinstance(ex, werkzeug_exceptions.HTTPException) else str(ex)) return api_utils.render({'error': status_code, 'error_message': description}, status=status_code) for code in six.iterkeys(werkzeug_exceptions.default_exceptions): app.register_error_handler(code, make_json_error) return app def build_v2_app(): """App builder (wsgi). Entry point for Experimental V2 Sahara REST API server """ app = build_app() api_v2.register_blueprints(app, url_prefix='/v2') return app class Router(object): def __call__(self, environ, response): return self.app(environ, response) @classmethod def factory(cls, global_config, **local_config): cls.app = build_app() return cls(**local_config) class RouterV2(object): def __call__(self, environ, response): return self.app(environ, response) @classmethod def factory(cls, global_config, **local_config): cls.app = build_v2_app() return cls(**local_config) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/middleware/version_discovery.py0000664000175000017500000000504200000000000023334 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_middleware import base from oslo_serialization import jsonutils import webob import webob.dec from sahara.api import microversion as mv class VersionResponseMiddlewareV1(base.Middleware): @webob.dec.wsgify def __call__(self, req): """Respond to a request for all Sahara API versions.""" path = req.environ['PATH_INFO'] if re.match(r"^/*$", path): response = webob.Response(request=req, status=300, content_type="application/json") response.body = jsonutils.dump_as_bytes(self._get_versions(req)) return response else: return self.application def _get_versions(self, req): """Populate the version response with APIv1 stuff.""" version_response = { "versions": [ {"id": "v1.0", "status": "SUPPORTED", "links": self._get_links("1.0", req) }, {"id": "v1.1", "status": "CURRENT", "links": self._get_links("1.1", req) } ] } return version_response @staticmethod def _get_links(version, req): href = "%s/v%s/" % (req.host_url, version) return [{"rel": "self", "href": href}] class VersionResponseMiddlewareV2(VersionResponseMiddlewareV1): def _get_versions(self, req): """Populate the version response with APIv1 and APIv2 stuff.""" version_response = ( super(VersionResponseMiddlewareV2, self)._get_versions(req) ) version_response["versions"][1]["status"] = "SUPPORTED" # v1.1 version_response["versions"].append( {"id": "v2", "status": "CURRENT", "links": self._get_links("2", req), "min_version": mv.MIN_API_VERSION, "max_version": mv.MAX_API_VERSION } ) return version_response ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v10.py0000664000175000017500000002767500000000000016071 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from sahara.api import acl from sahara.service.api import v10 as api from sahara.service import validation as v from sahara.service.validations import cluster_template_schema as ct_schema from sahara.service.validations import cluster_templates as v_ct from sahara.service.validations import clusters as v_c from sahara.service.validations import clusters_scaling as v_c_s from sahara.service.validations import clusters_schema as v_c_schema from sahara.service.validations import images as v_images from sahara.service.validations import node_group_template_schema as ngt_schema from sahara.service.validations import node_group_templates as v_ngt from sahara.service.validations import plugins as v_p import sahara.utils.api as u rest = u.Rest('v10', __name__) # Cluster ops @rest.get('/clusters') @acl.enforce("data-processing:clusters:get_all") @v.check_exists(api.get_cluster, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_clusters) def clusters_list(): result = api.get_clusters(**u.get_request_args().to_dict()) return u.render(res=result, name='clusters') @rest.post('/clusters') @acl.enforce("data-processing:clusters:create") @v.validate(v_c_schema.CLUSTER_SCHEMA, v_c.check_cluster_create) def clusters_create(data): return u.render(api.create_cluster(data).to_wrapped_dict()) @rest.post('/clusters/multiple') @acl.enforce("data-processing:clusters:create") @v.validate( v_c_schema.MULTIPLE_CLUSTER_SCHEMA, v_c.check_multiple_clusters_create) def clusters_create_multiple(data): return u.render(api.create_multiple_clusters(data)) @rest.put('/clusters/') @acl.enforce("data-processing:clusters:scale") @v.check_exists(api.get_cluster, 'cluster_id') @v.validate(v_c_schema.CLUSTER_SCALING_SCHEMA, v_c_s.check_cluster_scaling) def clusters_scale(cluster_id, data): return u.to_wrapped_dict(api.scale_cluster, cluster_id, data) @rest.get('/clusters/') @acl.enforce("data-processing:clusters:get") @v.check_exists(api.get_cluster, 'cluster_id') def clusters_get(cluster_id): data = u.get_request_args() show_events = six.text_type( data.get('show_progress', 'false')).lower() == 'true' return u.to_wrapped_dict(api.get_cluster, cluster_id, show_events) @rest.patch('/clusters/') @acl.enforce("data-processing:clusters:modify") @v.check_exists(api.get_cluster, 'cluster_id') @v.validate(v_c_schema.CLUSTER_UPDATE_SCHEMA, v_c.check_cluster_update) def clusters_update(cluster_id, data): return u.to_wrapped_dict(api.update_cluster, cluster_id, data) @rest.delete('/clusters/') @acl.enforce("data-processing:clusters:delete") @v.check_exists(api.get_cluster, 'cluster_id') @v.validate(None, v_c.check_cluster_delete) def clusters_delete(cluster_id): api.terminate_cluster(cluster_id) return u.render() # ClusterTemplate ops @rest.get('/cluster-templates') @acl.enforce("data-processing:cluster-templates:get_all") @v.check_exists(api.get_cluster_template, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_cluster_templates) def cluster_templates_list(): result = api.get_cluster_templates( **u.get_request_args().to_dict()) return u.render(res=result, name='cluster_templates') @rest.post('/cluster-templates') @acl.enforce("data-processing:cluster-templates:create") @v.validate(ct_schema.CLUSTER_TEMPLATE_SCHEMA, v_ct.check_cluster_template_create) def cluster_templates_create(data): return u.render(api.create_cluster_template(data).to_wrapped_dict()) @rest.get('/cluster-templates/') @acl.enforce("data-processing:cluster-templates:get") @v.check_exists(api.get_cluster_template, 'cluster_template_id') def cluster_templates_get(cluster_template_id): return u.to_wrapped_dict(api.get_cluster_template, cluster_template_id) @rest.put('/cluster-templates/') @acl.enforce("data-processing:cluster-templates:modify") @v.check_exists(api.get_cluster_template, 'cluster_template_id') @v.validate(ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA, v_ct.check_cluster_template_update) def cluster_templates_update(cluster_template_id, data): return u.to_wrapped_dict( api.update_cluster_template, cluster_template_id, data) @rest.delete('/cluster-templates/') @acl.enforce("data-processing:cluster-templates:delete") @v.check_exists(api.get_cluster_template, 'cluster_template_id') @v.validate(None, v_ct.check_cluster_template_usage) def cluster_templates_delete(cluster_template_id): api.terminate_cluster_template(cluster_template_id) return u.render() def _cluster_template_export_helper(template): template.pop('id') template.pop('updated_at') template.pop('created_at') template.pop('tenant_id') template.pop('is_default') template['default_image_id'] = '{default_image_id}' template['node_groups'] = '{node_groups}' @rest.get('/cluster-templates//export') @acl.enforce("data-processing:cluster-templates:get") @v.check_exists(api.get_cluster_template, 'cluster_template_id') def cluster_template_export(cluster_template_id): content = u.to_wrapped_dict_no_render( api.get_cluster_template, cluster_template_id) _cluster_template_export_helper(content['cluster_template']) res = u.render(content) res.headers.add('Content-Disposition', 'attachment', filename='cluster_template.json') return res # NodeGroupTemplate ops @rest.get('/node-group-templates') @acl.enforce("data-processing:node-group-templates:get_all") @v.check_exists(api.get_node_group_template, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_node_group_templates) def node_group_templates_list(): result = api.get_node_group_templates( **u.get_request_args().to_dict()) return u.render(res=result, name='node_group_templates') @rest.post('/node-group-templates') @acl.enforce("data-processing:node-group-templates:create") @v.validate(ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA, v_ngt.check_node_group_template_create) def node_group_templates_create(data): return u.render(api.create_node_group_template(data).to_wrapped_dict()) @rest.get('/node-group-templates/') @acl.enforce("data-processing:node-group-templates:get") @v.check_exists(api.get_node_group_template, 'node_group_template_id') def node_group_templates_get(node_group_template_id): return u.to_wrapped_dict( api.get_node_group_template, node_group_template_id) @rest.put('/node-group-templates/') @acl.enforce("data-processing:node-group-templates:modify") @v.check_exists(api.get_node_group_template, 'node_group_template_id') @v.validate(ngt_schema.NODE_GROUP_TEMPLATE_UPDATE_SCHEMA, v_ngt.check_node_group_template_update) def node_group_templates_update(node_group_template_id, data): return u.to_wrapped_dict( api.update_node_group_template, node_group_template_id, data) @rest.delete('/node-group-templates/') @acl.enforce("data-processing:node-group-templates:delete") @v.check_exists(api.get_node_group_template, 'node_group_template_id') @v.validate(None, v_ngt.check_node_group_template_usage) def node_group_templates_delete(node_group_template_id): api.terminate_node_group_template(node_group_template_id) return u.render() def _node_group_template_export_helper(template): template.pop('id') template.pop('updated_at') template.pop('created_at') template.pop('tenant_id') template.pop('is_default') template['flavor_id'] = '{flavor_id}' template['security_groups'] = '{security_groups}' template['image_id'] = '{image_id}' template['floating_ip_pool'] = '{floating_ip_pool}' @rest.get('/node-group-templates//export') @acl.enforce("data-processing:node-group-templates:get") @v.check_exists(api.get_node_group_template, 'node_group_template_id') def node_group_template_export(node_group_template_id): content = u.to_wrapped_dict_no_render( api.export_node_group_template, node_group_template_id) _node_group_template_export_helper(content['node_group_template']) res = u.render(content) res.headers.add('Content-Disposition', 'attachment', filename='node_group_template.json') return res # Plugins ops @rest.get('/plugins') @acl.enforce("data-processing:plugins:get_all") def plugins_list(): return u.render(plugins=[p.dict for p in api.get_plugins()]) @rest.get('/plugins/') @acl.enforce("data-processing:plugins:get") @v.check_exists(api.get_plugin, plugin_name='plugin_name') def plugins_get(plugin_name): return u.render(api.get_plugin(plugin_name).wrapped_dict) @rest.get('/plugins//') @acl.enforce("data-processing:plugins:get_version") @v.check_exists(api.get_plugin, plugin_name='plugin_name', version='version') def plugins_get_version(plugin_name, version): return u.render(api.get_plugin(plugin_name, version).wrapped_dict) @rest.patch('/plugins/') @acl.enforce("data-processing:plugins:patch") @v.check_exists(api.get_plugin, plugin_name='plugin_name') @v.validate(v_p.plugin_update_validation_jsonschema(), v_p.check_plugin_update) def plugins_update(plugin_name, data): return u.render(api.update_plugin(plugin_name, data).wrapped_dict) @rest.post_file('/plugins///convert-config/') @acl.enforce("data-processing:plugins:convert_config") @v.check_exists(api.get_plugin, plugin_name='plugin_name', version='version') @v.validate(None, v_p.check_convert_to_template) def plugins_convert_to_cluster_template(plugin_name, version, name, data): # There is no plugins that supports converting to cluster template # The last plugin with support of that is no longer supported pass # Image Registry ops @rest.get('/images') @acl.enforce("data-processing:images:get_all") def images_list(): tags = u.get_request_args().getlist('tags') name = u.get_request_args().get('name', None) return u.render(images=[i.dict for i in api.get_images(name, tags)]) @rest.get('/images/') @acl.enforce("data-processing:images:get") @v.check_exists(api.get_image, id='image_id') def images_get(image_id): return u.render(api.get_registered_image(image_id=image_id).wrapped_dict) @rest.post('/images/') @acl.enforce("data-processing:images:register") @v.check_exists(api.get_image, id='image_id') @v.validate(v_images.image_register_schema, v_images.check_image_register) def images_set(image_id, data): return u.render(api.register_image(image_id, **data).wrapped_dict) @rest.delete('/images/') @acl.enforce("data-processing:images:unregister") @v.check_exists(api.get_image, id='image_id') def images_unset(image_id): api.unregister_image(image_id) return u.render() @rest.post('/images//tag') @acl.enforce("data-processing:images:add_tags") @v.check_exists(api.get_image, id='image_id') @v.validate(v_images.image_tags_schema, v_images.check_tags) def image_tags_add(image_id, data): return u.render(api.add_image_tags(image_id, **data).wrapped_dict) @rest.post('/images//untag') @acl.enforce("data-processing:images:remove_tags") @v.check_exists(api.get_image, id='image_id') @v.validate(v_images.image_tags_schema) def image_tags_delete(image_id, data): return u.render(api.remove_image_tags(image_id, **data).wrapped_dict) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v11.py0000664000175000017500000002545200000000000016061 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api import v11 as api from sahara.service import validation as v from sahara.service.validations.edp import data_source as v_d_s from sahara.service.validations.edp import data_source_schema as v_d_s_schema from sahara.service.validations.edp import job as v_j from sahara.service.validations.edp import job_binary as v_j_b from sahara.service.validations.edp import job_binary_internal as v_j_b_i from sahara.service.validations.edp import job_binary_internal_schema as vjbi_s from sahara.service.validations.edp import job_binary_schema as v_j_b_schema from sahara.service.validations.edp import job_execution as v_j_e from sahara.service.validations.edp import job_execution_schema as v_j_e_schema from sahara.service.validations.edp import job_schema as v_j_schema import sahara.utils.api as u rest = u.Rest('v11', __name__) # Job execution ops @rest.get('/job-executions') @acl.enforce("data-processing:job-executions:get_all") @v.check_exists(api.get_job_execution, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_job_executions) def job_executions_list(): result = api.job_execution_list( **u.get_request_args().to_dict()) return u.render(res=result, name='job_executions') @rest.get('/job-executions/') @acl.enforce("data-processing:job-executions:get") @v.check_exists(api.get_job_execution, id='job_id') def job_executions(job_id): return u.to_wrapped_dict(api.get_job_execution, job_id) @rest.get('/job-executions//refresh-status') @acl.enforce("data-processing:job-executions:refresh_status") @v.check_exists(api.get_job_execution, id='job_id') def job_executions_status(job_id): return u.to_wrapped_dict(api.get_job_execution_status, job_id) @rest.get('/job-executions//cancel') @acl.enforce("data-processing:job-executions:cancel") @v.check_exists(api.get_job_execution, id='job_id') @v.validate(None, v_j_e.check_job_execution_cancel) def job_executions_cancel(job_id): return u.to_wrapped_dict(api.cancel_job_execution, job_id) @rest.patch('/job-executions/') @acl.enforce("data-processing:job-executions:modify") @v.check_exists(api.get_job_execution, id='job_id') @v.validate(v_j_e_schema.JOB_EXEC_UPDATE_SCHEMA, v_j_e.check_job_execution_update, v_j_e.check_job_status_update) def job_executions_update(job_id, data): return u.to_wrapped_dict(api.update_job_execution, job_id, data) @rest.delete('/job-executions/') @acl.enforce("data-processing:job-executions:delete") @v.check_exists(api.get_job_execution, id='job_id') @v.validate(None, v_j_e.check_job_execution_delete) def job_executions_delete(job_id): api.delete_job_execution(job_id) return u.render() # Data source ops @rest.get('/data-sources') @acl.enforce("data-processing:data-sources:get_all") @v.check_exists(api.get_data_source, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_data_sources) def data_sources_list(): result = api.get_data_sources(**u.get_request_args().to_dict()) return u.render(res=result, name='data_sources') @rest.post('/data-sources') @acl.enforce("data-processing:data-sources:register") @v.validate(v_d_s_schema.DATA_SOURCE_SCHEMA, v_d_s.check_data_source_create) def data_source_register(data): return u.render(api.register_data_source(data).to_wrapped_dict()) @rest.get('/data-sources/') @acl.enforce("data-processing:data-sources:get") @v.check_exists(api.get_data_source, 'data_source_id') def data_source_get(data_source_id): return u.to_wrapped_dict(api.get_data_source, data_source_id) @rest.delete('/data-sources/') @acl.enforce("data-processing:data-sources:delete") @v.check_exists(api.get_data_source, 'data_source_id') def data_source_delete(data_source_id): api.delete_data_source(data_source_id) return u.render() @rest.put('/data-sources/') @acl.enforce("data-processing:data-sources:modify") @v.check_exists(api.get_data_source, 'data_source_id') @v.validate( v_d_s_schema.DATA_SOURCE_UPDATE_SCHEMA, v_d_s.check_data_source_update) def data_source_update(data_source_id, data): return u.to_wrapped_dict(api.data_source_update, data_source_id, data) # Job ops @rest.get('/jobs') @acl.enforce("data-processing:jobs:get_all") @v.check_exists(api.get_job, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_jobs) def job_list(): result = api.get_jobs(**u.get_request_args().to_dict()) return u.render(res=result, name='jobs') @rest.post('/jobs') @acl.enforce("data-processing:jobs:create") @v.validate(v_j_schema.JOB_SCHEMA, v_j.check_mains_libs, v_j.check_interface) def job_create(data): return u.render(api.create_job(data).to_wrapped_dict()) @rest.get('/jobs/') @acl.enforce("data-processing:jobs:get") @v.check_exists(api.get_job, id='job_templates_id') def job_get(job_templates_id): return u.to_wrapped_dict(api.get_job, job_templates_id) @rest.patch('/jobs/') @acl.enforce("data-processing:jobs:modify") @v.check_exists(api.get_job, id='job_templates_id') @v.validate(v_j_schema.JOB_UPDATE_SCHEMA) def job_update(job_templates_id, data): return u.to_wrapped_dict(api.update_job, job_templates_id, data) @rest.delete('/jobs/') @acl.enforce("data-processing:jobs:delete") @v.check_exists(api.get_job, id='job_templates_id') def job_delete(job_templates_id): api.delete_job(job_templates_id) return u.render() @rest.post('/jobs//execute') @acl.enforce("data-processing:jobs:execute") @v.check_exists(api.get_job, id='job_templates_id') @v.validate(v_j_e_schema.JOB_EXEC_SCHEMA, v_j_e.check_job_execution) def job_execute(job_templates_id, data): return u.render(job_execution=api.execute_job( job_templates_id, data).to_dict()) @rest.get('/jobs/config-hints/') @acl.enforce("data-processing:jobs:get_config_hints") @v.check_exists(api.get_job_config_hints, job_type='job_type') def job_config_hints_get(job_type): return u.render(api.get_job_config_hints(job_type)) @rest.get('/job-types') @acl.enforce("data-processing:job-types:get_all") def job_types_get(): # We want to use flat=False with to_dict() so that # the value of each arg is given as a list. This supports # filters of the form ?type=Pig&type=Java, etc. return u.render(job_types=api.get_job_types( **u.get_request_args().to_dict(flat=False))) # Job binary ops @rest.post('/job-binaries') @acl.enforce("data-processing:job-binaries:create") @v.validate(v_j_b_schema.JOB_BINARY_SCHEMA, v_j_b.check_job_binary) def job_binary_create(data): return u.render(api.create_job_binary(data).to_wrapped_dict()) @rest.get('/job-binaries') @acl.enforce("data-processing:job-binaries:get_all") @v.check_exists(api.get_job_binaries, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_job_binaries) def job_binary_list(): result = api.get_job_binaries(**u.get_request_args().to_dict()) return u.render(res=result, name='binaries') @rest.get('/job-binaries/') @acl.enforce("data-processing:job-binaries:get") @v.check_exists(api.get_job_binary, 'job_binary_id') def job_binary_get(job_binary_id): return u.to_wrapped_dict(api.get_job_binary, job_binary_id) @rest.delete('/job-binaries/') @acl.enforce("data-processing:job-binaries:delete") @v.check_exists(api.get_job_binary, id='job_binary_id') def job_binary_delete(job_binary_id): api.delete_job_binary(job_binary_id) return u.render() @rest.get('/job-binaries//data') @acl.enforce("data-processing:job-binaries:get_data") @v.check_exists(api.get_job_binary, 'job_binary_id') def job_binary_data(job_binary_id): data = api.get_job_binary_data(job_binary_id) if type(data) == dict: data = u.render(data) return data @rest.put('/job-binaries/') @acl.enforce("data-processing:job-binaries:modify") @v.validate(v_j_b_schema.JOB_BINARY_UPDATE_SCHEMA, v_j_b.check_job_binary) def job_binary_update(job_binary_id, data): return u.render(api.update_job_binary(job_binary_id, data).to_wrapped_dict()) # Job binary internals ops @rest.put_file('/job-binary-internals/') @acl.enforce("data-processing:job-binary-internals:create") @v.validate(None, v_j_b_i.check_job_binary_internal) def job_binary_internal_create(**values): return u.render(api.create_job_binary_internal(values).to_wrapped_dict()) @rest.get('/job-binary-internals') @acl.enforce("data-processing:job-binary-internals:get_all") @v.check_exists(api.get_job_binary_internal, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_job_binary_internals) def job_binary_internal_list(): result = api.get_job_binary_internals(**u.get_request_args().to_dict()) return u.render(res=result, name='binaries') @rest.get('/job-binary-internals/') @acl.enforce("data-processing:job-binary-internals:get") @v.check_exists(api.get_job_binary_internal, 'job_binary_internal_id') def job_binary_internal_get(job_binary_internal_id): return u.to_wrapped_dict( api.get_job_binary_internal, job_binary_internal_id) @rest.delete('/job-binary-internals/') @acl.enforce("data-processing:job-binary-internals:delete") @v.check_exists(api.get_job_binary_internal, 'job_binary_internal_id') def job_binary_internal_delete(job_binary_internal_id): api.delete_job_binary_internal(job_binary_internal_id) return u.render() @rest.get('/job-binary-internals//data') @acl.enforce("data-processing:job-binary-internals:get_data") @v.check_exists(api.get_job_binary_internal, 'job_binary_internal_id') def job_binary_internal_data(job_binary_internal_id): return api.get_job_binary_internal_data(job_binary_internal_id) @rest.patch('/job-binary-internals/') @acl.enforce("data-processing:job-binaries:modify") @v.check_exists(api.get_job_binary_internal, 'job_binary_internal_id') @v.validate(vjbi_s.JOB_BINARY_UPDATE_SCHEMA) def job_binary_internal_update(job_binary_internal_id, data): return u.to_wrapped_dict( api.update_job_binary_internal, job_binary_internal_id, data) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.689891 sahara-16.0.0/sahara/api/v2/0000775000175000017500000000000000000000000015417 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/__init__.py0000664000175000017500000000527100000000000017535 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ v2 API interface package This package contains the endpoint definitions for the version 2 API. The modules in this package are named in accordance with the top-level resource primitives they represent. This module provides a convenience function to register all the endpoint blueprints to the ``/v2`` root. When creating new endpoint modules, the following steps should be taken to ensure they are properly registered with the Flask application: * create the module file with a name that indicates its endpoint * add a sahara.utils.api.RestV2 blueprint object * add an import to this module (__init__.py) * add a registration line for the new endpoint to the register_blueprint function """ from sahara.api.v2 import cluster_templates from sahara.api.v2 import clusters from sahara.api.v2 import data_sources from sahara.api.v2 import images from sahara.api.v2 import job_binaries from sahara.api.v2 import job_templates from sahara.api.v2 import job_types from sahara.api.v2 import jobs from sahara.api.v2 import node_group_templates from sahara.api.v2 import plugins def register_blueprints(app, url_prefix): """Register the v2 endpoints with a Flask application This function will take a Flask application object and register all the v2 endpoints. Register blueprints here when adding new endpoint modules. :param app: A Flask application object to register blueprints on :param url_prefix: The url prefix for the blueprints """ app.register_blueprint(cluster_templates.rest, url_prefix=url_prefix) app.register_blueprint(clusters.rest, url_prefix=url_prefix) app.register_blueprint(data_sources.rest, url_prefix=url_prefix) app.register_blueprint(images.rest, url_prefix=url_prefix) app.register_blueprint(job_binaries.rest, url_prefix=url_prefix) app.register_blueprint(jobs.rest, url_prefix=url_prefix) app.register_blueprint(job_types.rest, url_prefix=url_prefix) app.register_blueprint(job_templates.rest, url_prefix=url_prefix) app.register_blueprint(node_group_templates.rest, url_prefix=url_prefix) app.register_blueprint(plugins.rest, url_prefix=url_prefix) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/cluster_templates.py0000664000175000017500000001172400000000000021535 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import cluster_templates as api from sahara.service import validation as v from sahara.service.validations import cluster_template_schema as ct_schema from sahara.service.validations import cluster_templates as v_ct import sahara.utils.api as u rest = u.RestV2('cluster-templates', __name__) @rest.get('/cluster-templates') @acl.enforce("data-processing:cluster-template:list") @v.check_exists(api.get_cluster_template, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_cluster_templates) @v.validate_request_params(['plugin_name', 'plugin_version', 'name']) def cluster_templates_list(): request_args = u.get_request_args().to_dict() if 'plugin_version' in request_args: request_args['hadoop_version'] = request_args['plugin_version'] del request_args['plugin_version'] result = api.get_cluster_templates(**request_args) for ct in result: u._replace_hadoop_version_plugin_version(ct) u._replace_tenant_id_project_id(ct) return u.render(res=result, name='cluster_templates') @rest.post('/cluster-templates') @acl.enforce("data-processing:cluster-template:create") @v.validate(ct_schema.CLUSTER_TEMPLATE_SCHEMA_V2, v_ct.check_cluster_template_create) @v.validate_request_params([]) def cluster_templates_create(data): # renaming hadoop_version -> plugin_version # this can be removed once APIv1 is deprecated data['hadoop_version'] = data['plugin_version'] del data['plugin_version'] result = api.create_cluster_template(data).to_wrapped_dict() u._replace_hadoop_version_plugin_version(result['cluster_template']) u._replace_tenant_id_project_id(result['cluster_template']) return u.render(result) @rest.get('/cluster-templates/') @acl.enforce("data-processing:cluster-template:get") @v.check_exists(api.get_cluster_template, 'cluster_template_id') @v.validate_request_params([]) def cluster_templates_get(cluster_template_id): result = u.to_wrapped_dict_no_render( api.get_cluster_template, cluster_template_id) u._replace_hadoop_version_plugin_version(result['cluster_template']) u._replace_tenant_id_project_id(result['cluster_template']) return u.render(result) @rest.patch('/cluster-templates/') @acl.enforce("data-processing:cluster-template:update") @v.check_exists(api.get_cluster_template, 'cluster_template_id') @v.validate(ct_schema.CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2, v_ct.check_cluster_template_update) @v.validate_request_params([]) def cluster_templates_update(cluster_template_id, data): if data.get('plugin_version', None): data['hadoop_version'] = data['plugin_version'] del data['plugin_version'] result = u.to_wrapped_dict_no_render( api.update_cluster_template, cluster_template_id, data) u._replace_hadoop_version_plugin_version(result['cluster_template']) u._replace_tenant_id_project_id(result['cluster_template']) return u.render(result) @rest.delete('/cluster-templates/') @acl.enforce("data-processing:cluster-template:delete") @v.check_exists(api.get_cluster_template, 'cluster_template_id') @v.validate(None, v_ct.check_cluster_template_usage) @v.validate_request_params([]) def cluster_templates_delete(cluster_template_id): api.terminate_cluster_template(cluster_template_id) return u.render() def _cluster_template_export_helper(template): template.pop('id') template.pop('updated_at') template.pop('created_at') template.pop('project_id') template.pop('is_default') template['default_image_id'] = '{default_image_id}' template['node_groups'] = '{node_groups}' @rest.get('/cluster-templates//export') @acl.enforce("data-processing:cluster-template:get") @v.check_exists(api.get_cluster_template, 'cluster_template_id') @v.validate_request_params([]) def cluster_template_export(cluster_template_id): content = u.to_wrapped_dict_no_render( api.export_cluster_template, cluster_template_id) u._replace_hadoop_version_plugin_version(content['cluster_template']) u._replace_tenant_id_project_id(content['cluster_template']) _cluster_template_export_helper(content['cluster_template']) res = u.render(content) res.headers.add('Content-Disposition', 'attachment', filename='cluster_template.json') return res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/clusters.py0000664000175000017500000001231400000000000017636 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from sahara.api import acl from sahara.service.api.v2 import clusters as api from sahara.service import validation as v from sahara.service.validations import clusters as v_c from sahara.service.validations import clusters_scaling as v_c_s from sahara.service.validations import clusters_schema as v_c_schema import sahara.utils.api as u rest = u.RestV2('clusters', __name__) def _replace_tenant_id_project_id_provision_steps(c): if 'provision_progress' in c: for step in c['provision_progress']: dict.update(step, {'project_id': step['tenant_id']}) dict.pop(step, 'tenant_id') @rest.get('/clusters') @acl.enforce("data-processing:cluster:list") @v.check_exists(api.get_cluster, 'marker') @v.validate(None, v.validate_pagination_limit) @v.validate_request_params(['plugin_name', 'plugin_version', 'name']) def clusters_list(): request_args = u.get_request_args().to_dict() if 'plugin_version' in request_args: request_args['hadoop_version'] = request_args['plugin_version'] del request_args['plugin_version'] result = api.get_clusters(**request_args) for c in result: u._replace_hadoop_version_plugin_version(c) u._replace_tenant_id_project_id(c) _replace_tenant_id_project_id_provision_steps(c) return u.render(res=result, name='clusters') @rest.post('/clusters') @acl.enforce("data-processing:cluster:create") @v.validate(v_c_schema.CLUSTER_SCHEMA_V2, v_c.check_one_or_multiple_clusters_create) @v.validate_request_params([]) def clusters_create(data): # renaming hadoop_version -> plugin_version # this can be removed once APIv1 is deprecated data['hadoop_version'] = data['plugin_version'] del data['plugin_version'] if data.get('count', None) is not None: result = api.create_multiple_clusters(data) for c in result['clusters']: u._replace_hadoop_version_plugin_version(c['cluster']) u._replace_tenant_id_project_id(c['cluster']) return u.render(result) else: result = api.create_cluster(data).to_wrapped_dict() u._replace_hadoop_version_plugin_version(result['cluster']) u._replace_tenant_id_project_id(result['cluster']) return u.render(result) @rest.put('/clusters/') @acl.enforce("data-processing:cluster:scale") @v.check_exists(api.get_cluster, 'cluster_id') @v.validate(v_c_schema.CLUSTER_SCALING_SCHEMA_V2, v_c_s.check_cluster_scaling) @v.validate_request_params([]) def clusters_scale(cluster_id, data): result = u.to_wrapped_dict_no_render( api.scale_cluster, cluster_id, data) u._replace_hadoop_version_plugin_version(result['cluster']) u._replace_tenant_id_project_id(result['cluster']) _replace_tenant_id_project_id_provision_steps(result['cluster']) return u.render(result) @rest.get('/clusters/') @acl.enforce("data-processing:cluster:get") @v.check_exists(api.get_cluster, 'cluster_id') @v.validate_request_params(['show_progress']) def clusters_get(cluster_id): data = u.get_request_args() show_events = six.text_type( data.get('show_progress', 'false')).lower() == 'true' result = u.to_wrapped_dict_no_render( api.get_cluster, cluster_id, show_events) u._replace_hadoop_version_plugin_version(result['cluster']) u._replace_tenant_id_project_id(result['cluster']) _replace_tenant_id_project_id_provision_steps(result['cluster']) return u.render(result) @rest.patch('/clusters/') @acl.enforce("data-processing:cluster:update") @v.check_exists(api.get_cluster, 'cluster_id') @v.validate(v_c_schema.CLUSTER_UPDATE_SCHEMA_V2, v_c.check_cluster_update) @v.validate_request_params([]) def clusters_update(cluster_id, data): result = u.to_wrapped_dict_no_render( api.update_cluster, cluster_id, data) u._replace_hadoop_version_plugin_version(result['cluster']) u._replace_tenant_id_project_id(result['cluster']) _replace_tenant_id_project_id_provision_steps(result['cluster']) return u.render(result) @rest.delete('/clusters/') @acl.enforce("data-processing:cluster:delete") @v.check_exists(api.get_cluster, 'cluster_id') @v.validate(v_c_schema.CLUSTER_DELETE_SCHEMA_V2, v_c.check_cluster_delete) @v.validate_request_params([]) def clusters_delete(cluster_id): data = u.request_data() force = data.get('force', False) extra = api.get_cluster(cluster_id).get('extra', {}) stack_name = extra.get('heat_stack_name', None) if extra else None api.terminate_cluster(cluster_id, force=force) if force: return u.render({"stack_name": stack_name}, status=200) else: return u.render(res=None, status=204) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/data_sources.py0000664000175000017500000000546400000000000020456 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import data_sources as api from sahara.service import validation as v from sahara.service.validations.edp import data_source as v_d_s from sahara.service.validations.edp import data_source_schema as v_d_s_schema import sahara.utils.api as u rest = u.RestV2('data-sources', __name__) @rest.get('/data-sources') @acl.enforce("data-processing:data-source:list") @v.check_exists(api.get_data_source, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_data_sources) @v.validate_request_params(['type']) def data_sources_list(): result = api.get_data_sources(**u.get_request_args().to_dict()) for ds in result: u._replace_tenant_id_project_id(ds) return u.render(res=result, name='data_sources') @rest.post('/data-sources') @acl.enforce("data-processing:data-source:register") @v.validate(v_d_s_schema.DATA_SOURCE_SCHEMA, v_d_s.check_data_source_create) @v.validate_request_params([]) def data_source_register(data): result = api.register_data_source(data).to_wrapped_dict() u._replace_tenant_id_project_id(result['data_source']) return u.render(result) @rest.get('/data-sources/') @acl.enforce("data-processing:data-source:get") @v.check_exists(api.get_data_source, 'data_source_id') @v.validate_request_params([]) def data_source_get(data_source_id): result = api.get_data_source(data_source_id).to_wrapped_dict() u._replace_tenant_id_project_id(result['data_source']) return u.render(result) @rest.delete('/data-sources/') @acl.enforce("data-processing:data-source:delete") @v.check_exists(api.get_data_source, 'data_source_id') @v.validate_request_params([]) def data_source_delete(data_source_id): api.delete_data_source(data_source_id) return u.render() @rest.patch('/data-sources/') @acl.enforce("data-processing:data-source:update") @v.check_exists(api.get_data_source, 'data_source_id') @v.validate(v_d_s_schema.DATA_SOURCE_UPDATE_SCHEMA) @v.validate_request_params([]) def data_source_update(data_source_id, data): result = api.data_source_update(data_source_id, data).to_wrapped_dict() u._replace_tenant_id_project_id(result['data_source']) return u.render(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/images.py0000664000175000017500000000541500000000000017243 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import images as api from sahara.service import validation as v from sahara.service.validations import images as v_images import sahara.utils.api as u rest = u.RestV2('images', __name__) @rest.get('/images') @acl.enforce("data-processing:image:list") @v.validate_request_params(['name', 'tags', 'username']) def images_list(): tags = u.get_request_args().getlist('tags') name = u.get_request_args().get('name', None) return u.render(images=[i.dict for i in api.get_images(name, tags)]) @rest.get('/images/') @acl.enforce("data-processing:image:get") @v.check_exists(api.get_image, id='image_id') @v.validate_request_params([]) def images_get(image_id): return u.render(api.get_registered_image(id=image_id).wrapped_dict) @rest.post('/images/') @acl.enforce("data-processing:image:register") @v.check_exists(api.get_image, id='image_id') @v.validate(v_images.image_register_schema, v_images.check_image_register) @v.validate_request_params([]) def images_set(image_id, data): return u.render(api.register_image(image_id, **data).wrapped_dict) @rest.delete('/images/') @acl.enforce("data-processing:image:unregister") @v.check_exists(api.get_image, id='image_id') @v.validate_request_params([]) def images_unset(image_id): api.unregister_image(image_id) return u.render() @rest.get('/images//tags') @acl.enforce("data-processing:image:get-tags") @v.check_exists(api.get_image, id='image_id') @v.validate_request_params([]) def image_tags_get(image_id): return u.render(api.get_image_tags(image_id)) @rest.put('/images//tags', status_code=200) @acl.enforce("data-processing:image:set-tags") @v.check_exists(api.get_image, id='image_id') @v.validate(v_images.image_tags_schema, v_images.check_tags) @v.validate_request_params([]) def image_tags_update(image_id, data): return u.render(api.set_image_tags(image_id, **data).wrapped_dict) @rest.delete('/images//tags') @acl.enforce("data-processing:image:remove-tags") @v.check_exists(api.get_image, id='image_id') @v.validate_request_params([]) def image_tags_delete(image_id): api.remove_image_tags(image_id) return u.render() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/job_binaries.py0000664000175000017500000000607300000000000020425 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import job_binaries as api from sahara.service import validation as v from sahara.service.validations.edp import job_binary as v_j_b from sahara.service.validations.edp import job_binary_schema as v_j_b_schema import sahara.utils.api as u rest = u.RestV2('job-binaries', __name__) @rest.post('/job-binaries') @acl.enforce("data-processing:job-binary:create") @v.validate(v_j_b_schema.JOB_BINARY_SCHEMA, v_j_b.check_job_binary) @v.validate_request_params([]) def job_binary_create(data): result = api.create_job_binary(data).to_wrapped_dict() u._replace_tenant_id_project_id(result['job_binary']) return u.render(result) @rest.get('/job-binaries') @acl.enforce("data-processing:job-binary:list") @v.check_exists(api.get_job_binary, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_job_binaries) @v.validate_request_params(['name']) def job_binary_list(): result = api.get_job_binaries(**u.get_request_args().to_dict()) for jb in result: u._replace_tenant_id_project_id(jb) return u.render(res=result, name='binaries') @rest.get('/job-binaries/') @acl.enforce("data-processing:job-binary:get") @v.check_exists(api.get_job_binary, 'job_binary_id') @v.validate_request_params([]) def job_binary_get(job_binary_id): result = api.get_job_binary(job_binary_id).to_wrapped_dict() u._replace_tenant_id_project_id(result['job_binary']) return u.render(result) @rest.delete('/job-binaries/') @acl.enforce("data-processing:job-binary:delete") @v.check_exists(api.get_job_binary, id='job_binary_id') @v.validate_request_params([]) def job_binary_delete(job_binary_id): api.delete_job_binary(job_binary_id) return u.render() @rest.get('/job-binaries//data') @acl.enforce("data-processing:job-binary:get-data") @v.check_exists(api.get_job_binary, 'job_binary_id') @v.validate_request_params([]) def job_binary_data(job_binary_id): data = api.get_job_binary_data(job_binary_id) if type(data) == dict: data = u.render(data) return data @rest.patch('/job-binaries/') @acl.enforce("data-processing:job-binary:update") @v.validate(v_j_b_schema.JOB_BINARY_UPDATE_SCHEMA, v_j_b.check_job_binary) @v.validate_request_params([]) def job_binary_update(job_binary_id, data): result = api.update_job_binary(job_binary_id, data).to_wrapped_dict() u._replace_tenant_id_project_id(result['job_binary']) return u.render(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/job_templates.py0000664000175000017500000000767100000000000020634 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import job_templates as api from sahara.service import validation as v from sahara.service.validations.edp import job as v_j from sahara.service.validations.edp import job_schema as v_j_schema import sahara.utils.api as u rest = u.RestV2('job-templates', __name__) def _replace_tenant_id_project_id_job_binary(jb_list): for jb_obj in jb_list: dict.update(jb_obj, {'project_id': jb_obj['tenant_id']}) dict.pop(jb_obj, 'tenant_id') @rest.get('/job-templates') @acl.enforce("data-processing:job-template:list") @v.check_exists(api.get_job_template, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_jobs) @v.validate_request_params(['type', 'name']) def job_templates_list(): result = api.get_job_templates(**u.get_request_args().to_dict()) for jt in result: u._replace_tenant_id_project_id(jt) _replace_tenant_id_project_id_job_binary(jt['mains']) _replace_tenant_id_project_id_job_binary(jt['libs']) return u.render(res=result, name='job_templates') @rest.post('/job-templates') @acl.enforce("data-processing:job-template:create") @v.validate(v_j_schema.JOB_SCHEMA, v_j.check_mains_libs, v_j.check_interface) @v.validate_request_params([]) def job_templates_create(data): result = {'job_template': api.create_job_template(data).to_dict()} u._replace_tenant_id_project_id(result['job_template']) _replace_tenant_id_project_id_job_binary(result['job_template']['mains']) _replace_tenant_id_project_id_job_binary(result['job_template']['libs']) return u.render(result) @rest.get('/job-templates/') @acl.enforce("data-processing:job-template:get") @v.check_exists(api.get_job_template, id='job_templates_id') @v.validate_request_params([]) def job_templates_get(job_templates_id): result = {'job_template': api.get_job_template( job_templates_id).to_dict()} u._replace_tenant_id_project_id(result['job_template']) _replace_tenant_id_project_id_job_binary(result['job_template']['mains']) _replace_tenant_id_project_id_job_binary(result['job_template']['libs']) return u.render(result) @rest.patch('/job-templates/') @acl.enforce("data-processing:job-template:update") @v.check_exists(api.get_job_template, id='job_templates_id') @v.validate(v_j_schema.JOB_UPDATE_SCHEMA) @v.validate_request_params([]) def job_templates_update(job_templates_id, data): result = {'job_template': api.update_job_template( job_templates_id, data).to_dict()} u._replace_tenant_id_project_id(result['job_template']) _replace_tenant_id_project_id_job_binary(result['job_template']['mains']) _replace_tenant_id_project_id_job_binary(result['job_template']['libs']) return u.render(result) @rest.delete('/job-templates/') @acl.enforce("data-processing:job-template:delete") @v.check_exists(api.get_job_template, id='job_templates_id') @v.validate_request_params([]) def job_templates_delete(job_templates_id): api.delete_job_template(job_templates_id) return u.render() @rest.get('/job-templates/config-hints/') @acl.enforce("data-processing:job-template:get-config-hints") @v.check_exists(api.get_job_config_hints, job_type='job_type') @v.validate_request_params([]) def job_config_hints_get(job_type): return u.render(api.get_job_config_hints(job_type)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/job_types.py0000664000175000017500000000255300000000000017774 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import job_types as api from sahara.service import validation as v import sahara.utils.api as u rest = u.RestV2('job-types', __name__) @rest.get('/job-types') @acl.enforce("data-processing:job-type:list") @v.validate_request_params(['type', 'plugin_name', 'plugin_version']) def job_types_get(): # We want to use flat=False with to_dict() so that # the value of each arg is given as a list. This supports # filters of the form ?type=Pig&type=Java, etc. request_args = u.get_request_args().to_dict(flat=False) if 'plugin_version' in request_args: request_args['hadoop_version'] = request_args['plugin_version'] del request_args['plugin_version'] return u.render(job_types=api.get_job_types(**request_args)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/jobs.py0000664000175000017500000000677400000000000016744 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from sahara.api import acl from sahara.service.api.v2 import jobs as api from sahara.service import validation as v from sahara.service.validations.edp import job_execution as v_j_e from sahara.service.validations.edp import job_execution_schema as v_j_e_schema import sahara.utils.api as u rest = u.RestV2('jobs', __name__) def _replace_job_id_job_template_id(job_obj): dict.update(job_obj, {'job_template_id': job_obj['job_id']}) dict.pop(job_obj, 'job_id') @rest.get('/jobs') @acl.enforce("data-processing:job:list") @v.check_exists(api.get_job_execution, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_job_executions) @v.validate_request_params(['status']) def jobs_list(): result = api.job_execution_list(**u.get_request_args().to_dict()) # APIv2: renaming oozie_job_id -> engine_job_id # once APIv1 is deprecated this can be # removed for je in result: je.pop('oozie_job_id', force=True) u._replace_tenant_id_project_id(je) _replace_job_id_job_template_id(je) return u.render(res=result, name='jobs') @rest.post('/jobs') @acl.enforce("data-processing:job:execute") @v.validate(v_j_e_schema.JOB_EXEC_SCHEMA_V2, v_j_e.check_job_execution) @v.validate_request_params([]) def jobs_execute(data): result = {'job': api.execute_job(data)} dict.update(result['job'], {'engine_job_id': result['job']['oozie_job_id']}) dict.pop(result['job'], 'oozie_job_id') u._replace_tenant_id_project_id(result['job']) _replace_job_id_job_template_id(result['job']) return u.render(result) @rest.get('/jobs/') @acl.enforce("data-processing:job:get") @v.check_exists(api.get_job_execution, id='job_id') @v.validate_request_params([]) def jobs_get(job_id): data = u.get_request_args() refresh_status = six.text_type( data.get('refresh_status', 'false')).lower() == 'true' result = {'job': api.get_job_execution(job_id, refresh_status)} result['job'].pop('oozie_job_id', force=True) u._replace_tenant_id_project_id(result['job']) _replace_job_id_job_template_id(result['job']) return u.render(result) @rest.patch('/jobs/') @acl.enforce("data-processing:job:update") @v.check_exists(api.get_job_execution, id='job_id') @v.validate( v_j_e_schema.JOB_EXEC_UPDATE_SCHEMA, v_j_e.check_job_execution_update) @v.validate_request_params([]) def jobs_update(job_id, data): result = {'job': api.update_job_execution(job_id, data)} result['job'].pop('oozie_job_id', force=True) u._replace_tenant_id_project_id(result['job']) _replace_job_id_job_template_id(result['job']) return u.render(result) @rest.delete('/jobs/') @acl.enforce("data-processing:job:delete") @v.check_exists(api.get_job_execution, id='job_id') @v.validate(None, v_j_e.check_job_execution_delete) @v.validate_request_params([]) def jobs_delete(job_id): api.delete_job_execution(job_id) return u.render() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/node_group_templates.py0000664000175000017500000001240500000000000022212 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import node_group_templates as api from sahara.service import validation as v from sahara.service.validations import node_group_template_schema as ngt_schema from sahara.service.validations import node_group_templates as v_ngt import sahara.utils.api as u rest = u.RestV2('node-group-templates', __name__) @rest.get('/node-group-templates') @acl.enforce("data-processing:node-group-template:list") @v.check_exists(api.get_node_group_template, 'marker') @v.validate(None, v.validate_pagination_limit, v.validate_sorting_node_group_templates) @v.validate_request_params(['plugin_name', 'plugin_version', 'name']) def node_group_templates_list(): request_args = u.get_request_args().to_dict() if 'plugin_version' in request_args: request_args['hadoop_version'] = request_args['plugin_version'] del request_args['plugin_version'] result = api.get_node_group_templates(**request_args) for ngt in result: u._replace_hadoop_version_plugin_version(ngt) u._replace_tenant_id_project_id(ngt) return u.render(res=result, name="node_group_templates") @rest.post('/node-group-templates') @acl.enforce("data-processing:node-group-template:create") @v.validate(ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA_V2, v_ngt.check_node_group_template_create) @v.validate_request_params([]) def node_group_templates_create(data): # renaming hadoop_version -> plugin_version # this can be removed once APIv1 is deprecated data['hadoop_version'] = data['plugin_version'] del data['plugin_version'] result = api.create_node_group_template(data).to_wrapped_dict() u._replace_hadoop_version_plugin_version(result['node_group_template']) u._replace_tenant_id_project_id(result['node_group_template']) return u.render(result) @rest.get('/node-group-templates/') @acl.enforce("data-processing:node-group-template:get") @v.check_exists(api.get_node_group_template, 'node_group_template_id') @v.validate_request_params([]) def node_group_templates_get(node_group_template_id): result = u.to_wrapped_dict_no_render( api.get_node_group_template, node_group_template_id) u._replace_hadoop_version_plugin_version(result['node_group_template']) u._replace_tenant_id_project_id(result['node_group_template']) return u.render(result) @rest.patch('/node-group-templates/') @acl.enforce("data-processing:node-group-template:update") @v.check_exists(api.get_node_group_template, 'node_group_template_id') @v.validate(ngt_schema.NODE_GROUP_TEMPLATE_UPDATE_SCHEMA_V2, v_ngt.check_node_group_template_update) @v.validate_request_params([]) def node_group_templates_update(node_group_template_id, data): if data.get('plugin_version', None): data['hadoop_version'] = data['plugin_version'] del data['plugin_version'] result = u.to_wrapped_dict_no_render( api.update_node_group_template, node_group_template_id, data) u._replace_hadoop_version_plugin_version(result['node_group_template']) u._replace_tenant_id_project_id(result['node_group_template']) return u.render(result) @rest.delete('/node-group-templates/') @acl.enforce("data-processing:node-group-template:delete") @v.check_exists(api.get_node_group_template, 'node_group_template_id') @v.validate(None, v_ngt.check_node_group_template_usage) @v.validate_request_params([]) def node_group_templates_delete(node_group_template_id): api.terminate_node_group_template(node_group_template_id) return u.render() def _node_group_template_export_helper(template): template.pop('id') template.pop('updated_at') template.pop('created_at') template.pop('project_id') template.pop('is_default') template['flavor_id'] = '{flavor_id}' template['security_groups'] = '{security_groups}' template['image_id'] = '{image_id}' template['floating_ip_pool'] = '{floating_ip_pool}' @rest.get('/node-group-templates//export') @acl.enforce("data-processing:node-group-template:get") @v.check_exists(api.get_node_group_template, 'node_group_template_id') @v.validate_request_params([]) def node_group_template_export(node_group_template_id): content = u.to_wrapped_dict_no_render( api.export_node_group_template, node_group_template_id) u._replace_hadoop_version_plugin_version(content['node_group_template']) u._replace_tenant_id_project_id(content['node_group_template']) _node_group_template_export_helper(content['node_group_template']) res = u.render(content) res.headers.add('Content-Disposition', 'attachment', filename='node_group_template.json') return res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/api/v2/plugins.py0000664000175000017500000000365300000000000017461 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.api import acl from sahara.service.api.v2 import plugins as api from sahara.service import validation as v from sahara.service.validations import plugins as v_p import sahara.utils.api as u rest = u.RestV2('plugins', __name__) @rest.get('/plugins') @acl.enforce("data-processing:plugin:list") @v.validate_request_params([]) def plugins_list(): return u.render(plugins=[p.dict for p in api.get_plugins()]) @rest.get('/plugins/') @acl.enforce("data-processing:plugin:get") @v.check_exists(api.get_plugin, plugin_name='plugin_name') @v.validate_request_params([]) def plugins_get(plugin_name): return u.render(api.get_plugin(plugin_name).wrapped_dict) @rest.get('/plugins//') @acl.enforce("data-processing:plugin:get-version") @v.check_exists(api.get_plugin, plugin_name='plugin_name', version='version') @v.validate_request_params([]) def plugins_get_version(plugin_name, version): return u.render(api.get_plugin(plugin_name, version).wrapped_dict) @rest.patch('/plugins/') @acl.enforce("data-processing:plugin:update") @v.check_exists(api.get_plugin, plugin_name='plugin_name') @v.validate(v_p.plugin_update_validation_jsonschema(), v_p.check_plugin_update) @v.validate_request_params([]) def plugins_update(plugin_name, data): return u.render(api.update_plugin(plugin_name, data).wrapped_dict) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.689891 sahara-16.0.0/sahara/cli/0000775000175000017500000000000000000000000015066 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/__init__.py0000664000175000017500000000000000000000000017165 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.693891 sahara-16.0.0/sahara/cli/image_pack/0000775000175000017500000000000000000000000017146 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/image_pack/__init__.py0000664000175000017500000000000000000000000021245 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/image_pack/api.py0000664000175000017500000001015600000000000020274 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor # noqa from sahara.plugins import base as plugins_base from sahara.utils import remote try: import guestfs except ImportError: raise Exception("The image packing API depends on the system package " "python-libguestfs (and libguestfs itself.) Please " "install these packages to proceed.") LOG = None CONF = None # This is broken out to support testability def set_logger(log): global LOG LOG = log # This is broken out to support testability def set_conf(conf): global CONF CONF = conf # This is a local exception class that is used to exit routines # in cases where error information has already been logged. # It is caught and suppressed everywhere it is used. class Handled(Exception): pass class Context(object): '''Create a pseudo Context object Since this tool does not use the REST interface, we do not have a request from which to build a Context. ''' def __init__(self, is_admin=False, tenant_id=None): self.is_admin = is_admin self.tenant_id = tenant_id class ImageRemote(remote.TerminalOnlyRemote): def __init__(self, image_path, root_drive): guest = guestfs.GuestFS(python_return_dict=True) guest.add_drive_opts(image_path, format="qcow2") guest.set_network(True) self.guest = guest self.root_drive = root_drive def __enter__(self): self.guest.launch() if not self.root_drive: self.root_drive = self.guest.inspect_os()[0] self.guest.mount(self.root_drive, '/') try: cmd = "echo Testing sudo without tty..." self.execute_command(cmd, run_as_root=True) except RuntimeError: cmd = "sed -i 's/requiretty/!requiretty/' /etc/sudoers" self.guest.execute_command(cmd) return self def __exit__(self, exc_type, exc_value, traceback): self.guest.sync() self.guest.umount_all() self.guest.close() def execute_command(self, cmd, run_as_root=False, get_stderr=False, raise_when_error=True, timeout=300): try: LOG.info("Issuing command: {cmd}".format(cmd=cmd)) stdout = self.guest.sh(cmd) LOG.info("Received response: {stdout}".format(stdout=stdout)) return 0, stdout except RuntimeError as ex: if raise_when_error: raise else: return 1, ex.message def get_os_distrib(self): return self.guest.inspect_get_distro(self.root_drive) def write_file_to(self, path, script, run_as_root): LOG.info("Writing script to : {path}".format(path=path)) stdout = self.guest.write(path, script) return 0, stdout def setup_plugins(): plugins_base.setup_plugins() def get_loaded_plugins(): return plugins_base.PLUGINS.plugins def get_plugin_arguments(plugin_name): """Gets plugin arguments, as a dict of version to argument list.""" plugin = plugins_base.PLUGINS.get_plugin(plugin_name) versions = plugin.get_versions() return {version: plugin.get_image_arguments(version) for version in versions} def pack_image(image_path, plugin_name, plugin_version, image_arguments, root_drive=None, test_only=False): with ImageRemote(image_path, root_drive) as image_remote: plugin = plugins_base.PLUGINS.get_plugin(plugin_name) plugin.pack_image(plugin_version, image_remote, test_only=test_only, image_arguments=image_arguments) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/image_pack/cli.py0000664000175000017500000001036000000000000020267 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from oslo_config import cfg from oslo_log import log import six from sahara.cli.image_pack import api from sahara.i18n import _ LOG = log.getLogger(__name__) CONF = cfg.CONF CONF.register_cli_opts([ cfg.StrOpt( 'image', required=True, help=_("The path to an image to modify. This image will be modified " "in-place: be sure to target a copy if you wish to maintain a " "clean master image.")), cfg.StrOpt( 'root-filesystem', dest='root_fs', required=False, help=_("The filesystem to mount as the root volume on the image. No " "value is required if only one filesystem is detected.")), cfg.BoolOpt( 'test-only', dest='test_only', default=False, help=_("If this flag is set, no changes will be made to the image; " "instead, the script will fail if discrepancies are found " "between the image and the intended state."))]) def unregister_extra_cli_opt(name): try: for cli in CONF._cli_opts: if cli['opt'].name == name: CONF.unregister_opt(cli['opt']) except Exception: pass for extra_opt in ["log-exchange", "host", "port"]: unregister_extra_cli_opt(extra_opt) def add_plugin_parsers(subparsers): api.setup_plugins() for plugin in api.get_loaded_plugins(): args_by_version = api.get_plugin_arguments(plugin) if all(args is NotImplemented for version, args in six.iteritems(args_by_version)): continue plugin_parser = subparsers.add_parser( plugin, help=_('Image generation for the {plugin} plugin').format( plugin=plugin)) version_parsers = plugin_parser.add_subparsers( title=_("Plugin version"), dest="version", help=_("Available versions")) for version, args in six.iteritems(args_by_version): if args is NotImplemented: continue version_parser = version_parsers.add_parser( version, help=_('{plugin} version {version}').format( plugin=plugin, version=version)) for arg in args: arg_token = ("--%s" % arg.name if len(arg.name) > 1 else "-%s" % arg.name) version_parser.add_argument(arg_token, dest=arg.name, help=arg.description, default=arg.default, required=arg.required, choices=arg.choices) version_parser.set_defaults(args={arg.name for arg in args}) command_opt = cfg.SubCommandOpt('plugin', title=_('Plugin'), help=_('Available plugins'), handler=add_plugin_parsers) CONF.register_cli_opt(command_opt) def main(): CONF(project='sahara') CONF.reload_config_files() log.setup(CONF, "sahara") LOG.info("Command: {command}".format(command=' '.join(sys.argv))) api.set_logger(LOG) api.set_conf(CONF) plugin = CONF.plugin.name version = CONF.plugin.version args = CONF.plugin.args image_arguments = {arg: getattr(CONF.plugin, arg) for arg in args} api.pack_image(CONF.image, plugin, version, image_arguments, CONF.root_fs, CONF.test_only) LOG.info("Finished packing image for {plugin} at version " "{version}".format(plugin=plugin, version=version)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/sahara_all.py0000664000175000017500000000373100000000000017533 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.utils import patches patches.patch_all() import os import sys from oslo_log import log LOG = log.getLogger(__name__) # If ../sahara/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'sahara', '__init__.py')): sys.path.insert(0, possible_topdir) import sahara.main as server def main(): server.setup_common(possible_topdir, 'all-in-one') app = server.make_app() server.setup_sahara_api('all-in-one') server.setup_sahara_engine() server.setup_auth_policy() launcher = server.get_process_launcher() LOG.warning(""" __ __ _ \ \ / /_ _ _ __ _ __ (_)_ __ __ _ \ \ /\ / / _` | '__| '_ \| | '_ \ / _` | \ V V / (_| | | | | | | | | | | (_| | \_/\_/ \__,_|_| |_| |_|_|_| |_|\__, | |___/ Using the sahara-all entry point is now deprecated. Please use the sahara-api and sahara-engine entry points instead. """) server.launch_api_service( launcher, server.SaharaWSGIService("sahara-all", app)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/sahara_api.py0000664000175000017500000000301100000000000017523 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys # If ../sahara/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'sahara', '__init__.py')): sys.path.insert(0, possible_topdir) import sahara.main as server def setup_api(): server.setup_common(possible_topdir, 'API') app = server.make_app() server.setup_sahara_api('distributed') server.setup_auth_policy() return app def main(): app = setup_api() launcher = server.get_process_launcher() api_service = server.SaharaWSGIService("sahara-api", app) server.launch_api_service(launcher, api_service) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/sahara_engine.py0000664000175000017500000000306000000000000020223 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.utils import patches patches.patch_all() import os import sys # If ../sahara/__init__.py exists, add ../ to Python search path, so that # it will override what happens to be installed in /usr/(local/)lib/python... possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), os.pardir, os.pardir)) if os.path.exists(os.path.join(possible_topdir, 'sahara', '__init__.py')): sys.path.insert(0, possible_topdir) import sahara.main as server from sahara.service import ops def main(): server.setup_common(possible_topdir, 'engine') server.setup_sahara_engine() server.setup_sahara_api('distributed') ops_server = ops.OpsServer() launcher = server.get_process_launcher() service = ops_server.get_service() launcher.launch_service(service) service.start() launcher.wait() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/sahara_status.py0000664000175000017500000000237400000000000020310 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_upgradecheck import common_checks from oslo_upgradecheck import upgradecheck from sahara.i18n import _ CONF = cfg.CONF class Checks(upgradecheck.UpgradeCommands): """Contains upgrade checks Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ _upgrade_checks = ( (_("Policy File JSON to YAML Migration"), (common_checks.check_policy_json, {'conf': CONF})), ) def main(): return upgradecheck.main( CONF, project='sahara', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/cli/sahara_subprocess.py0000664000175000017500000000420000000000000021143 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import _io import pickle # nosec import sys import traceback from oslo_utils import reflection def main(): # NOTE(dmitryme): since we do not read stderr in the main process, # we need to flush it somewhere, otherwise both processes might # hang because of i/o buffer overflow. with open('/dev/null', 'w') as sys.stderr: while True: result = dict() try: # TODO(elmiko) these pickle usages should be # reinvestigated to determine a more secure manner to # deploy remote commands. if isinstance(sys.stdin, _io.TextIOWrapper): func = pickle.load(sys.stdin.buffer) # nosec args = pickle.load(sys.stdin.buffer) # nosec kwargs = pickle.load(sys.stdin.buffer) # nosec else: func = pickle.load(sys.stdin) # nosec args = pickle.load(sys.stdin) # nosec kwargs = pickle.load(sys.stdin) # nosec result['output'] = func(*args, **kwargs) except BaseException as e: cls_name = reflection.get_class_name(e, fully_qualified=False) result['exception'] = cls_name + ': ' + str(e) result['traceback'] = traceback.format_exc() if isinstance(sys.stdin, _io.TextIOWrapper): pickle.dump(result, sys.stdout.buffer, protocol=2) # nosec else: pickle.dump(result, sys.stdout, protocol=2) # nosec sys.stdout.flush() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.693891 sahara-16.0.0/sahara/common/0000775000175000017500000000000000000000000015607 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/__init__.py0000664000175000017500000000000000000000000017706 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/config.py0000664000175000017500000000356600000000000017440 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Hewlett Packard Enterprise Development Corporation, LP # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_middleware import cors from oslo_policy import opts from sahara import config def set_config_defaults(): """This method updates all configuration default values.""" set_cors_middleware_defaults() # TODO(gmann): Remove setting the default value of config policy_file # once oslo_policy change the default value to 'policy.yaml'. # https://opendev.org/openstack/oslo.policy/src/commit/d8534850d9238e85ae0ea55bf2ac8583681fdb2b/oslo_policy/opts.py#L49 opts.set_defaults(config.CONF, 'policy.yaml') def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-OpenStack-Request-ID'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-OpenStack-Request-ID'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.697891 sahara-16.0.0/sahara/common/policies/0000775000175000017500000000000000000000000017416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/__init__.py0000664000175000017500000000450700000000000021535 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from sahara.common.policies import base from sahara.common.policies import cluster from sahara.common.policies import cluster_template from sahara.common.policies import cluster_templates from sahara.common.policies import clusters from sahara.common.policies import data_source from sahara.common.policies import data_sources from sahara.common.policies import image from sahara.common.policies import images from sahara.common.policies import job from sahara.common.policies import job_binaries from sahara.common.policies import job_binary from sahara.common.policies import job_binary_internals from sahara.common.policies import job_executions from sahara.common.policies import job_template from sahara.common.policies import job_type from sahara.common.policies import job_types from sahara.common.policies import jobs from sahara.common.policies import node_group_template from sahara.common.policies import node_group_templates from sahara.common.policies import plugin from sahara.common.policies import plugins def list_rules(): return itertools.chain( base.list_rules(), clusters.list_rules(), cluster_templates.list_rules(), data_sources.list_rules(), images.list_rules(), job_binaries.list_rules(), job_binary_internals.list_rules(), job_executions.list_rules(), job_types.list_rules(), jobs.list_rules(), node_group_templates.list_rules(), plugins.list_rules(), cluster.list_rules(), cluster_template.list_rules(), data_source.list_rules(), image.list_rules(), job_binary.list_rules(), job_type.list_rules(), job.list_rules(), node_group_template.list_rules(), plugin.list_rules(), job_template.list_rules() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/base.py0000664000175000017500000000417700000000000020713 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy DATA_PROCESSING = 'data-processing:%s' DATA_PROCESSING_CLUSTERS = DATA_PROCESSING % 'clusters:%s' DATA_PROCESSING_CLUSTER_TEMPLATES = DATA_PROCESSING % 'cluster-templates:%s' DATA_PROCESSING_DATA_SOURCES = DATA_PROCESSING % 'data-sources:%s' DATA_PROCESSING_IMAGES = DATA_PROCESSING % 'images:%s' DATA_PROCESSING_JOB_BINARIES = DATA_PROCESSING % 'job-binaries:%s' DATA_PROCESSING_JOB_EXECUTIONS = DATA_PROCESSING % 'job-executions:%s' DATA_PROCESSING_JOB_TYPES = DATA_PROCESSING % 'job-types:%s' DATA_PROCESSING_JOBS = DATA_PROCESSING % 'jobs:%s' DATA_PROCESSING_PLUGINS = DATA_PROCESSING % 'plugins:%s' DATA_PROCESSING_NODE_GROUP_TEMPLATES = ( DATA_PROCESSING % 'node-group-templates:%s') DATA_PROCESSING_JOB_BINARY_INTERNALS = ( DATA_PROCESSING % 'job-binary-internals:%s') DATA_PROCESSING_CLUSTER = DATA_PROCESSING % 'cluster:%s' DATA_PROCESSING_CLUSTER_TEMPLATE = DATA_PROCESSING % 'cluster-template:%s' DATA_PROCESSING_DATA_SOURCE = DATA_PROCESSING % 'data-source:%s' DATA_PROCESSING_IMAGE = DATA_PROCESSING % 'image:%s' DATA_PROCESSING_JOB_BINARY = DATA_PROCESSING % 'job-binary:%s' DATA_PROCESSING_JOB_TEMPLATE = DATA_PROCESSING % 'job-template:%s' DATA_PROCESSING_JOB_TYPE = DATA_PROCESSING % 'job-type:%s' DATA_PROCESSING_JOB = DATA_PROCESSING % 'job:%s' DATA_PROCESSING_PLUGIN = DATA_PROCESSING % 'plugin:%s' DATA_PROCESSING_NODE_GROUP_TEMPLATE = ( DATA_PROCESSING % 'node-group-template:%s') UNPROTECTED = '' ROLE_ADMIN = 'role:admin' rules = [ policy.RuleDefault( name='context_is_admin', check_str=ROLE_ADMIN), ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/cluster.py0000664000175000017500000000432600000000000021456 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base clusters_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER % 'scale', check_str=base.UNPROTECTED, description='Scale cluster.', operations=[{'path': '/v2/clusters/{cluster_id}', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER % 'list', check_str=base.UNPROTECTED, description='List available clusters', operations=[{'path': '/v2/clusters', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER % 'create', check_str=base.UNPROTECTED, description='Create cluster.', operations=[{'path': '/v2/clusters', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER % 'get', check_str=base.UNPROTECTED, description='Show details of a cluster.', operations=[{'path': '/v2/clusters/{cluster_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER % 'update', check_str=base.UNPROTECTED, description='Updates a cluster.', operations=[{'path': '/v2/clusters/{cluster_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER % 'delete', check_str=base.UNPROTECTED, description='Delete a cluster.', operations=[{'path': '/v2/clusters/{cluster_id}', 'method': 'DELETE'}]), ] def list_rules(): return clusters_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/cluster_template.py0000664000175000017500000000417000000000000023346 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base cluster_templates_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATE % 'create', check_str=base.UNPROTECTED, description='Create cluster template.', operations=[{'path': '/v2/cluster-templates', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATE % 'delete', check_str=base.UNPROTECTED, description='Delete a cluster template.', operations=[ {'path': '/v2/cluster-templates/{cluster_temp_id}', 'method': 'DELETE'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATE % 'update', check_str=base.UNPROTECTED, description='Update cluster template.', operations=[ {'path': '/v2/cluster-templates/{cluster_temp_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATE % 'get', check_str=base.UNPROTECTED, description='Show cluster template details.', operations=[ {'path': '/v2/cluster-templates/{cluster_temp_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATE % 'list', check_str=base.UNPROTECTED, description='List cluster templates.', operations=[{'path': '/v2/cluster-templates', 'method': 'GET'}]), ] def list_rules(): return cluster_templates_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/cluster_templates.py0000664000175000017500000000431100000000000023526 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base cluster_templates_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATES % 'create', check_str=base.UNPROTECTED, description='Create cluster template.', operations=[{'path': '/v1.1/{project_id}/cluster-templates', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATES % 'delete', check_str=base.UNPROTECTED, description='Delete a cluster template.', operations=[ {'path': '/v1.1/{project_id}/cluster-templates/{cluster_temp_id}', 'method': 'DELETE'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATES % 'modify', check_str=base.UNPROTECTED, description='Update cluster template.', operations=[ {'path': '/v1.1/{project_id}/cluster-templates/{cluster_temp_id}', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATES % 'get', check_str=base.UNPROTECTED, description='Show cluster template details.', operations=[ {'path': '/v1.1/{project_id}/cluster-templates/{cluster_temp_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTER_TEMPLATES % 'get_all', check_str=base.UNPROTECTED, description='List cluster templates.', operations=[{'path': '/v1.1/{project_id}/cluster-templates', 'method': 'GET'}]), ] def list_rules(): return cluster_templates_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/clusters.py0000664000175000017500000000447000000000000021641 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base clusters_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTERS % 'scale', check_str=base.UNPROTECTED, description='Scale cluster.', operations=[{'path': '/v1.1/{project_id}/clusters/{cluster_id}', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTERS % 'get_all', check_str=base.UNPROTECTED, description='List available clusters', operations=[{'path': '/v1.1/{project_id}/clusters', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTERS % 'create', check_str=base.UNPROTECTED, description='Create cluster.', operations=[{'path': '/v1.1/{project_id}/clusters', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTERS % 'get', check_str=base.UNPROTECTED, description='Show details of a cluster.', operations=[{'path': '/v1.1/{project_id}/clusters/{cluster_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTERS % 'modify', check_str=base.UNPROTECTED, description='Modify a cluster.', operations=[{'path': '/v1.1/{project_id}/clusters/{cluster_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_CLUSTERS % 'delete', check_str=base.UNPROTECTED, description='Delete a cluster.', operations=[{'path': '/v1.1/{project_id}/clusters/{cluster_id}', 'method': 'DELETE'}]), ] def list_rules(): return clusters_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/data_source.py0000664000175000017500000000404000000000000022257 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base data_sources_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCE % 'list', check_str=base.UNPROTECTED, description='List data sources.', operations=[{'path': '/v2/data-sources', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCE % 'get', check_str=base.UNPROTECTED, description='Show data source details.', operations=[ {'path': '/v2/data-sources/{data_source_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCE % 'register', check_str=base.UNPROTECTED, description='Create data source.', operations=[{'path': '/v2/data-sources', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCE % 'update', check_str=base.UNPROTECTED, description='Update data source.', operations=[ {'path': '/v2/data-sources/{data_source_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCE % 'delete', check_str=base.UNPROTECTED, description='Delete data source.', operations=[ {'path': '/v2/data-sources/{data_source_id}', 'method': 'DELETE'}]), ] def list_rules(): return data_sources_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/data_sources.py0000664000175000017500000000416100000000000022446 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base data_sources_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCES % 'get_all', check_str=base.UNPROTECTED, description='List data sources.', operations=[{'path': '/v1.1/{project_id}/data-sources', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCES % 'get', check_str=base.UNPROTECTED, description='Show data source details.', operations=[ {'path': '/v1.1/{project_id}/data-sources/{data_source_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCES % 'register', check_str=base.UNPROTECTED, description='Create data source.', operations=[{'path': '/v1.1/{project_id}/data-sources', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCES % 'modify', check_str=base.UNPROTECTED, description='Update data source.', operations=[ {'path': '/v1.1/{project_id}/data-sources/{data_source_id}', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_DATA_SOURCES % 'delete', check_str=base.UNPROTECTED, description='Delete data source.', operations=[ {'path': '/v1.1/{project_id}/data-sources/{data_source_id}', 'method': 'DELETE'}]), ] def list_rules(): return data_sources_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/image.py0000664000175000017500000000470600000000000021061 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base images_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGE % 'set-tags', check_str=base.UNPROTECTED, description='Add tags to image.', operations=[{'path': '/v2/images/{image_id}/tags', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGE % 'register', check_str=base.UNPROTECTED, description='Register image.', operations=[{'path': '/v2/images/{image_id}', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGE % 'list', check_str=base.UNPROTECTED, description='List images.', operations=[{'path': '/v2/images', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGE % 'unregister', check_str=base.UNPROTECTED, description='Unregister image.', operations=[{'path': '/v2/images/{image_id}', 'method': 'DELETE'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGE % 'get', check_str=base.UNPROTECTED, description='Show image details.', operations=[{'path': '/v2/images/{image_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGE % 'remove-tags', check_str=base.UNPROTECTED, description='Remove tags from image.', operations=[{'path': '/v2/images/{image_id}/tags', 'method': 'DELETE'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGE % 'get-tags', check_str=base.UNPROTECTED, description='List tags on an image.', operations=[{'path': '/v2/images/{image_id}/tags', 'method': 'GET'}]), ] def list_rules(): return images_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/images.py0000664000175000017500000000443000000000000021236 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base images_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGES % 'add_tags', check_str=base.UNPROTECTED, description='Add tags to image.', operations=[{'path': '/v1.1/{project_id}/images/{image_id}/tag', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGES % 'register', check_str=base.UNPROTECTED, description='Register image.', operations=[{'path': '/v1.1/{project_id}/images/{image_id}', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGES % 'get_all', check_str=base.UNPROTECTED, description='List images.', operations=[{'path': '/v1.1/{project_id}/images', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGES % 'unregister', check_str=base.UNPROTECTED, description='Unregister image.', operations=[{'path': '/v1.1/{project_id}/images/{image_id}', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGES % 'get', check_str=base.UNPROTECTED, description='Show image details.', operations=[{'path': '/v1.1/{project_id}/images/{image_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_IMAGES % 'remove_tags', check_str=base.UNPROTECTED, description='Remove tags from image.', operations=[{'path': '/v1.1/{project_id}/images/{image_id}/untag', 'method': 'POST'}]), ] def list_rules(): return images_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job.py0000664000175000017500000000340300000000000020542 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB % 'execute', check_str=base.UNPROTECTED, description='Run job.', operations=[{'path': '/v2/jobs', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB % 'get', check_str=base.UNPROTECTED, description='Show jobs details.', operations=[{'path': '/v2/jobs/{job_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB % 'update', check_str=base.UNPROTECTED, description='Update job.', operations=[{'path': '/v2/jobs/{job_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB % 'list', check_str=base.UNPROTECTED, description='List jobs.', operations=[{'path': '/v2/jobs', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB % 'delete', check_str=base.UNPROTECTED, description='Delete job.', operations=[{'path': '/v2/jobs/{job_id}', 'method': 'DELETE'}]), ] def list_rules(): return job_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job_binaries.py0000664000175000017500000000462200000000000022422 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_binaries_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARIES % 'get_all', check_str=base.UNPROTECTED, description='List job binaries.', operations=[{'path': '/v1.1/{project_id}/job-binaries', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARIES % 'create', check_str=base.UNPROTECTED, description='Create job binary.', operations=[{'path': '/v1.1/{project_id}/job-binaries', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARIES % 'get_data', check_str=base.UNPROTECTED, description='Show job binary data.', operations=[ {'path': '/v1.1/{project_id}/job-binaries/{job-binary_id}/data', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARIES % 'modify', check_str=base.UNPROTECTED, description='Update job binary.', operations=[ {'path': '/v1.1/{project_id}/job-binaries/{job-binary_id}', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARIES % 'get', check_str=base.UNPROTECTED, description='Show job binary details.', operations=[{'path': '/v1.1/{project_id}/job-binaries/{job_binary_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARIES % 'delete', check_str=base.UNPROTECTED, description='Delete job binary.', operations=[{'path': '/v1.1/{project_id}/job-binaries/{job_binary_id}', 'method': 'DELETE'}]), ] def list_rules(): return job_binaries_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job_binary.py0000664000175000017500000000445300000000000022114 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_binaries_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY % 'list', check_str=base.UNPROTECTED, description='List job binaries.', operations=[{'path': '/v2/job-binaries', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY % 'create', check_str=base.UNPROTECTED, description='Create job binary.', operations=[{'path': '/v2/job-binaries', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY % 'get-data', check_str=base.UNPROTECTED, description='Show job binary data.', operations=[ {'path': '/v2/job-binaries/{job_binary_id}/data', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY % 'update', check_str=base.UNPROTECTED, description='Update job binary.', operations=[ {'path': '/v2/job-binaries/{job_binary_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY % 'get', check_str=base.UNPROTECTED, description='Show job binary details.', operations=[{'path': '/v2/job-binaries/{job_binary_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY % 'delete', check_str=base.UNPROTECTED, description='Delete job binary.', operations=[{'path': '/v2/job-binaries/{job_binary_id}', 'method': 'DELETE'}]), ] def list_rules(): return job_binaries_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job_binary_internals.py0000664000175000017500000000512600000000000024171 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_binary_internals_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY_INTERNALS % 'get', check_str=base.UNPROTECTED, description='Show job binary internal details.', operations=[{ 'path': '/v1.1/{project_id}/job-binary-internals/{job_bin_int_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY_INTERNALS % 'get_all', check_str=base.UNPROTECTED, description='List job binary internals.', operations=[{'path': '/v1.1/{project_id}/job-binary-internals', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY_INTERNALS % 'create', check_str=base.UNPROTECTED, description='Create job binary internals.', operations=[{'path': '/v1.1/{project_id}/job-binary-internals/{name}', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY_INTERNALS % 'get_data', check_str=base.UNPROTECTED, description='Show job binary internal data.', operations=[{ 'path': '/v1.1/{project_id}/job-binary-internals/{job_bin_int_id}/data', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY_INTERNALS % 'modify', check_str=base.UNPROTECTED, description='Update job binary internal.', operations=[{ 'path': '/v1.1/{project_id}/job-binary-internals/{job_bin_int_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_BINARY_INTERNALS % 'delete', check_str=base.UNPROTECTED, description='Delete job binary internals.', operations=[{ 'path': '/v1.1/{project_id}/job-binary-internals/{job_bin_int_id}', 'method': 'DELETE'}]), ] def list_rules(): return job_binary_internals_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job_executions.py0000664000175000017500000000477700000000000023027 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_executions_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_EXECUTIONS % 'get', check_str=base.UNPROTECTED, description='Show job executions details.', operations=[{'path': '/v1.1/{project_id}/job-executions/{job_exec_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_EXECUTIONS % 'modify', check_str=base.UNPROTECTED, description='Update job execution.', operations=[{'path': '/v1.1/{project_id}/job-executions/{job_exec_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_EXECUTIONS % 'get_all', check_str=base.UNPROTECTED, description='List job executions.', operations=[{'path': '/v1.1/{project_id}/job-executions', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_EXECUTIONS % 'refresh_status', check_str=base.UNPROTECTED, description='Refresh job execution status.', operations=[ {'path': '/v1.1/{project_id}/job-executions/{job_exec_id}/refresh-status', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_EXECUTIONS % 'cancel', check_str=base.UNPROTECTED, description='Cancel job execution.', operations=[{'path': '/v1.1/{project_id}/job-executions/{job_exec_id}/cancel', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_EXECUTIONS % 'delete', check_str=base.UNPROTECTED, description='Delete job execution.', operations=[{'path': '/v1.1/{project_id}/job-executions/{job_exec_id}', 'method': 'DELETE'}]), ] def list_rules(): return job_executions_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job_template.py0000664000175000017500000000452100000000000022437 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_templates_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TEMPLATE % 'get', check_str=base.UNPROTECTED, description='Show job template details.', operations=[{'path': '/v2/job-templates/{job_temp_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TEMPLATE % 'create', check_str=base.UNPROTECTED, description='Create job templates.', operations=[{'path': '/v2/job-templates', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TEMPLATE % 'list', check_str=base.UNPROTECTED, description='List job templates.', operations=[{'path': '/v2/job-templates', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TEMPLATE % 'update', check_str=base.UNPROTECTED, description='Update job template.', operations=[{'path': '/v2/job-templates/{job_temp_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TEMPLATE % 'get-config-hints', check_str=base.UNPROTECTED, description='Get job template config hints.', operations=[ {'path': '/v2/job-templates/config-hints/{job_type}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TEMPLATE % 'delete', check_str=base.UNPROTECTED, description='Remove job template.', operations=[{'path': '/v2/job-templates/{job_temp_id}', 'method': 'DELETE'}]), ] def list_rules(): return job_templates_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job_type.py0000664000175000017500000000166000000000000021606 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_types_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TYPE % 'list', check_str=base.UNPROTECTED, description='List job types.', operations=[{'path': '/v2/job-types', 'method': 'GET'}]), ] def list_rules(): return job_types_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/job_types.py0000664000175000017500000000170300000000000021767 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base job_types_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOB_TYPES % 'get_all', check_str=base.UNPROTECTED, description='List job types.', operations=[{'path': '/v1.1/{project_id}/job-types', 'method': 'GET'}]), ] def list_rules(): return job_types_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/jobs.py0000664000175000017500000000473600000000000020737 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base jobs_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOBS % 'execute', check_str=base.UNPROTECTED, description='Run job.', operations=[{'path': '/v1.1/{project_id}/jobs/{job_id}/execute', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOBS % 'get', check_str=base.UNPROTECTED, description='Show job details.', operations=[{'path': '/v1.1/{project_id}/jobs/{job_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOBS % 'create', check_str=base.UNPROTECTED, description='Create job.', operations=[{'path': '/v1.1/{project_id}/jobs', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOBS % 'get_all', check_str=base.UNPROTECTED, description='List jobs.', operations=[{'path': '/v1.1/{project_id}/jobs', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOBS % 'modify', check_str=base.UNPROTECTED, description='Update job object.', operations=[{'path': '/v1.1/{project_id}/jobs/{job_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOBS % 'get_config_hints', check_str=base.UNPROTECTED, description='Get job config hints.', operations=[ {'path': '/v1.1/{project_id}/jobs/get_config_hints/{job_type}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_JOBS % 'delete', check_str=base.UNPROTECTED, description='Remove job.', operations=[{'path': '/v1.1/{project_id}/jobs/{job_id}', 'method': 'DELETE'}]), ] def list_rules(): return jobs_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/node_group_template.py0000664000175000017500000000426200000000000024030 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base node_group_templates_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATE % 'list', check_str=base.UNPROTECTED, description='List node group templates.', operations=[{'path': '/v2/node-group-templates', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATE % 'create', check_str=base.UNPROTECTED, description='Create node group template.', operations=[{'path': '/v2/node-group-templates', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATE % 'get', check_str=base.UNPROTECTED, description='Show node group template details.', operations=[ {'path': '/v2/node-group-templates/{node_group_temp_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATE % 'update', check_str=base.UNPROTECTED, description='Update node group template.', operations=[ {'path': '/v2/node-group-templates/{node_group_temp_id}', 'method': 'PATCH'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATE % 'delete', check_str=base.UNPROTECTED, description='Delete node group template.', operations=[ {'path': '/v2/node-group-templates/{node_group_temp_id}', 'method': 'DELETE'}]), ] def list_rules(): return node_group_templates_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/node_group_templates.py0000664000175000017500000000445200000000000024214 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base node_group_templates_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATES % 'get_all', check_str=base.UNPROTECTED, description='List node group templates.', operations=[{'path': '/v1.1/{project_id}/node-group-templates', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATES % 'create', check_str=base.UNPROTECTED, description='Create node group template.', operations=[{'path': '/v1.1/{project_id}/node-group-templates', 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATES % 'get', check_str=base.UNPROTECTED, description='Show node group template details.', operations=[ {'path': '/v1.1/{project_id}/node-group-templates/{node_group_temp_id}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATES % 'modify', check_str=base.UNPROTECTED, description='Update node group template.', operations=[ {'path': '/v1.1/{project_id}/node-group-templates/{node_group_temp_id}', 'method': 'PUT'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_NODE_GROUP_TEMPLATES % 'delete', check_str=base.UNPROTECTED, description='Delete node group template.', operations=[ {'path': '/v1.1/{project_id}/node-group-templates/{node_group_temp_id}', 'method': 'DELETE'}]), ] def list_rules(): return node_group_templates_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/plugin.py0000664000175000017500000000334100000000000021267 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base plugins_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGIN % 'list', check_str=base.UNPROTECTED, description='List plugins.', operations=[{'path': '/v2/plugins', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGIN % 'get-version', check_str=base.UNPROTECTED, description='Show plugins version details.', operations=[ {'path': '/v2/plugins/{plugin_name}/{version}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGIN % 'get', check_str=base.UNPROTECTED, description='Show plugin details.', operations=[{'path': '/v2/plugins/{plugin_name}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGIN % 'update', check_str=base.ROLE_ADMIN, description='Update plugin details.', operations=[{'path': '/v2/plugins/{plugin_name}', 'method': 'PATCH'}]), ] def list_rules(): return plugins_policies ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/common/policies/plugins.py0000664000175000017500000000422500000000000021454 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_policy import policy from sahara.common.policies import base plugins_policies = [ policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGINS % 'get_all', check_str=base.UNPROTECTED, description='List plugins.', operations=[{'path': '/v1.1/{project_id}/plugins', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGINS % 'get_version', check_str=base.UNPROTECTED, description='Show plugins version details.', operations=[ {'path': '/v1.1/{project_id}/plugins/{plugin_name}/{version}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGINS % 'get', check_str=base.UNPROTECTED, description='Show plugin details.', operations=[{'path': '/v1.1/{project_id}/plugins/{plugin_name}', 'method': 'GET'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGINS % 'convert_config', check_str=base.UNPROTECTED, description='Convert plugins to cluster template', operations=[ {'path': ('/v1.1/{project_id}/plugins/{plugin_name}/' '{version}/convert-config/{name}'), 'method': 'POST'}]), policy.DocumentedRuleDefault( name=base.DATA_PROCESSING_PLUGINS % 'patch', check_str=base.ROLE_ADMIN, description='Update plugin details.', operations=[{'path': '/v1.1/{project_id}/plugins/{plugin_name}', 'method': 'PATCH'}]), ] def list_rules(): return plugins_policies ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.697891 sahara-16.0.0/sahara/conductor/0000775000175000017500000000000000000000000016317 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/conductor/__init__.py0000664000175000017500000000223600000000000020433 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from sahara.conductor import api as conductor_api from sahara.i18n import _ def Api(use_local=True, **kwargs): """Creates local or remote conductor Api. Creation of local or remote conductor Api depends on passed arg 'use_local' and config option 'use_local' in 'conductor' group. """ if cfg.CONF.conductor.use_local or use_local: api = conductor_api.LocalApi else: raise NotImplementedError( _("Remote conductor isn't implemented yet.")) # api = conductor.RemoteApi return api(**kwargs) API = Api() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/conductor/api.py0000664000175000017500000006037500000000000017455 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Handles all requests to the conductor service.""" from oslo_config import cfg from sahara.conductor import manager from sahara.conductor import resource as r conductor_opts = [ cfg.BoolOpt('use_local', default=True, help='Perform sahara-conductor operations locally.'), ] conductor_group = cfg.OptGroup(name='conductor', title='Conductor Options') CONF = cfg.CONF CONF.register_group(conductor_group) CONF.register_opts(conductor_opts, conductor_group) def _get_id(obj): """Return object id. Allows usage of both an object or an object's ID as a parameter when dealing with relationships. """ try: return obj.id except AttributeError: return obj class LocalApi(object): """A local version of the conductor API. It does database updates locally instead of via RPC. """ def __init__(self): self._manager = manager.ConductorManager() # Cluster ops @r.wrap(r.ClusterResource) def cluster_get(self, context, cluster, show_progress=False): """Return the cluster or None if it does not exist.""" return self._manager.cluster_get( context, _get_id(cluster), show_progress) @r.wrap(r.ClusterResource) def cluster_get_all(self, context, regex_search=False, **kwargs): """Get all clusters filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.cluster_get_all(context, regex_search, **kwargs) @r.wrap(r.ClusterResource) def cluster_create(self, context, values): """Create a cluster from the values dictionary. :returns: the created cluster. """ return self._manager.cluster_create(context, values) @r.wrap(r.ClusterResource) def cluster_update(self, context, cluster, values): """Update the cluster with the given values dictionary. :returns: the updated cluster. """ return self._manager.cluster_update(context, _get_id(cluster), values) def cluster_destroy(self, context, cluster): """Destroy the cluster or raise if it does not exist. :returns: None. """ self._manager.cluster_destroy(context, _get_id(cluster)) # Node Group ops def node_group_add(self, context, cluster, values): """Create a node group from the values dictionary. :returns: ID of the created node group. """ return self._manager.node_group_add(context, _get_id(cluster), values) def node_group_update(self, context, node_group, values): """Update the node group with the given values dictionary. :returns: None. """ self._manager.node_group_update(context, _get_id(node_group), values) def node_group_remove(self, context, node_group): """Destroy the node group or raise if it does not exist. :returns: None. """ self._manager.node_group_remove(context, _get_id(node_group)) # Instance ops def instance_add(self, context, node_group, values): """Create an instance from the values dictionary. :returns: ID of the created instance. """ return self._manager.instance_add(context, _get_id(node_group), values) def instance_update(self, context, instance, values): """Update the instance with the given values dictionary. :returns: None. """ self._manager.instance_update(context, _get_id(instance), values) def instance_remove(self, context, instance): """Destroy the instance or raise if it does not exist. :returns: None. """ self._manager.instance_remove(context, _get_id(instance)) # Volumes ops def append_volume(self, context, instance, volume_id): """Append volume_id to instance.""" self._manager.append_volume(context, _get_id(instance), volume_id) def remove_volume(self, context, instance, volume_id): """Remove volume_id in instance.""" self._manager.remove_volume(context, _get_id(instance), volume_id) # Cluster Template ops @r.wrap(r.ClusterTemplateResource) def cluster_template_get(self, context, cluster_template): """Return the cluster template or None if it does not exist.""" return self._manager.cluster_template_get(context, _get_id(cluster_template)) @r.wrap(r.ClusterTemplateResource) def cluster_template_get_all(self, context, regex_search=False, **kwargs): """Get all cluster templates filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.cluster_template_get_all(context, regex_search, **kwargs) @r.wrap(r.ClusterTemplateResource) def cluster_template_create(self, context, values): """Create a cluster template from the values dictionary. :returns: the created cluster template """ return self._manager.cluster_template_create(context, values) def cluster_template_destroy(self, context, cluster_template, ignore_prot_on_def=False): """Destroy the cluster template or raise if it does not exist. :returns: None """ self._manager.cluster_template_destroy(context, _get_id(cluster_template), ignore_prot_on_def) @r.wrap(r.ClusterTemplateResource) def cluster_template_update(self, context, id, cluster_template, ignore_prot_on_def=False): """Update the cluster template or raise if it does not exist. :returns: the updated cluster template """ return self._manager.cluster_template_update(context, id, cluster_template, ignore_prot_on_def) # Node Group Template ops @r.wrap(r.NodeGroupTemplateResource) def node_group_template_get(self, context, node_group_template): """Return the node group template or None if it does not exist.""" return self._manager.node_group_template_get( context, _get_id(node_group_template)) @r.wrap(r.NodeGroupTemplateResource) def node_group_template_get_all(self, context, regex_search=False, **kwargs): """Get all node group templates filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.node_group_template_get_all( context, regex_search, **kwargs) @r.wrap(r.NodeGroupTemplateResource) def node_group_template_create(self, context, values): """Create a node group template from the values dictionary. :returns: the created node group template """ return self._manager.node_group_template_create(context, values) def node_group_template_destroy(self, context, node_group_template, ignore_prot_on_def=False): """Destroy the node group template or raise if it does not exist. :returns: None """ self._manager.node_group_template_destroy(context, _get_id(node_group_template), ignore_prot_on_def) @r.wrap(r.NodeGroupTemplateResource) def node_group_template_update(self, context, id, values, ignore_prot_on_def=False): """Update a node group template from the values dictionary. :returns: the updated node group template """ return self._manager.node_group_template_update(context, id, values, ignore_prot_on_def) # Data Source ops @r.wrap(r.DataSource) def data_source_get(self, context, data_source): """Return the Data Source or None if it does not exist.""" return self._manager.data_source_get(context, _get_id(data_source)) @r.wrap(r.DataSource) def data_source_get_all(self, context, regex_search=False, **kwargs): """Get all Data Sources filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.data_source_get_all(context, regex_search, **kwargs) def data_source_count(self, context, **kwargs): """Count Data Sources filtered by **kwargs. Uses sqlalchemy "in_" clause for any tuple values Uses sqlalchemy "like" clause for any string values containing % """ return self._manager.data_source_count(context, **kwargs) @r.wrap(r.DataSource) def data_source_create(self, context, values): """Create a Data Source from the values dictionary.""" return self._manager.data_source_create(context, values) def data_source_destroy(self, context, data_source): """Destroy the Data Source or raise if it does not exist.""" self._manager.data_source_destroy(context, _get_id(data_source)) @r.wrap(r.DataSource) def data_source_update(self, context, id, values): """Update an existing Data Source""" return self._manager.data_source_update(context, id, values) # JobExecution ops @r.wrap(r.JobExecution) def job_execution_get(self, context, job_execution): """Return the JobExecution or None if it does not exist.""" return self._manager.job_execution_get(context, _get_id(job_execution)) @r.wrap(r.JobExecution) def job_execution_get_all(self, context, regex_search=False, **kwargs): """Get all JobExecutions filtered by **kwargs. kwargs key values may be the names of fields in a JobExecution plus the following special values with the indicated meaning: 'cluster.name' -- name of the Cluster referenced by the JobExecution 'job.name' -- name of the Job referenced by the JobExecution 'status' -- JobExecution['info']['status'] :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.job_execution_get_all(context, regex_search, **kwargs) def job_execution_count(self, context, **kwargs): """Count number of JobExecutions filtered by **kwargs. e.g. job_execution_count(cluster_id=12, input_id=123) """ return self._manager.job_execution_count(context, **kwargs) @r.wrap(r.JobExecution) def job_execution_create(self, context, values): """Create a JobExecution from the values dictionary.""" return self._manager.job_execution_create(context, values) @r.wrap(r.JobExecution) def job_execution_update(self, context, job_execution, values): """Update the JobExecution or raise if it does not exist.""" return self._manager.job_execution_update(context, _get_id(job_execution), values) def job_execution_destroy(self, context, job_execution): """Destroy the JobExecution or raise if it does not exist.""" self._manager.job_execution_destroy(context, _get_id(job_execution)) # Job ops @r.wrap(r.Job) def job_get(self, context, job): """Return the Job or None if it does not exist.""" return self._manager.job_get(context, _get_id(job)) @r.wrap(r.Job) def job_get_all(self, context, regex_search=False, **kwargs): """Get all Jobs filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.job_get_all(context, regex_search, **kwargs) @r.wrap(r.Job) def job_create(self, context, values): """Create a Job from the values dictionary.""" return self._manager.job_create(context, values) @r.wrap(r.Job) def job_update(self, context, job, values): """Update the Job or raise if it does not exist.""" return self._manager.job_update(context, _get_id(job), values) def job_destroy(self, context, job): """Destroy the Job or raise if it does not exist.""" self._manager.job_destroy(context, _get_id(job)) def job_main_name(self, context, job): """Return the name of the first main JobBinary or None. At present the 'mains' element is expected to contain a single element. In the future if 'mains' contains more than one element we will need a scheme or convention for retrieving a name from the list of binaries. :param job: This is expected to be a Job object """ if job.mains: binary = self.job_binary_get(context, job.mains[0]) if binary is not None: return binary["name"] return None def job_lib_names(self, context, job): """Return the name of all job lib binaries or an empty list. :param job: This is expected to be a Job object """ lib_ids = job.libs or [] binaries = (self.job_binary_get(context, lib_id) for lib_id in lib_ids) return [binary["name"] for binary in binaries if binary is not None] # JobBinary ops @r.wrap(r.JobBinary) def job_binary_get_all(self, context, regex_search=False, **kwargs): """Get all JobBinarys filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.job_binary_get_all(context, regex_search, **kwargs) @r.wrap(r.JobBinary) def job_binary_get(self, context, job_binary): """Return the JobBinary or None if it does not exist.""" return self._manager.job_binary_get(context, _get_id(job_binary)) @r.wrap(r.JobBinary) def job_binary_create(self, context, values): """Create a JobBinary from the values dictionary.""" return self._manager.job_binary_create(context, values) def job_binary_destroy(self, context, job_binary): """Destroy the JobBinary or raise if it does not exist.""" self._manager.job_binary_destroy(context, _get_id(job_binary)) @r.wrap(r.JobBinary) def job_binary_update(self, context, id, values): """Update a JobBinary from the values dictionary.""" return self._manager.job_binary_update(context, id, values) # JobBinaryInternal ops @r.wrap(r.JobBinaryInternal) def job_binary_internal_get_all(self, context, regex_search=False, **kwargs): """Get all JobBinaryInternals filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self._manager.job_binary_internal_get_all( context, regex_search, **kwargs) @r.wrap(r.JobBinaryInternal) def job_binary_internal_get(self, context, job_binary_internal): """Return the JobBinaryInternal or None if it does not exist.""" return self._manager.job_binary_internal_get( context, _get_id(job_binary_internal)) @r.wrap(r.JobBinaryInternal) def job_binary_internal_create(self, context, values): """Create a JobBinaryInternal from the values dictionary.""" return self._manager.job_binary_internal_create(context, values) def job_binary_internal_destroy(self, context, job_binary_internal_id): """Destroy the JobBinaryInternal or raise if it does not exist.""" self._manager.job_binary_internal_destroy( context, _get_id(job_binary_internal_id)) def job_binary_internal_get_raw_data(self, context, job_binary_internal_id): """Return the binary data field from a JobBinaryInternal.""" return self._manager.job_binary_internal_get_raw_data( context, job_binary_internal_id) @r.wrap(r.JobBinaryInternal) def job_binary_internal_update(self, context, job_binary_internal, values): """Update a JobBinaryInternal from the values dictionary.""" return self._manager.job_binary_internal_update( context, _get_id(job_binary_internal), values) # Events ops def cluster_provision_step_add(self, context, cluster_id, values): """Create a provisioning step assigned to cluster from values dict.""" return self._manager.cluster_provision_step_add( context, cluster_id, values) def cluster_provision_step_update(self, context, provision_step): """Update the cluster provisioning step.""" return self._manager.cluster_provision_step_update( context, provision_step) def cluster_provision_progress_update(self, context, cluster_id): """Return cluster with provision progress updated field.""" return self._manager.cluster_provision_progress_update( context, cluster_id) def cluster_event_add(self, context, provision_step, values): """Assign new event to the specified provision step.""" return self._manager.cluster_event_add( context, provision_step, values) @r.wrap(r.ClusterVerificationResource) def cluster_verification_add(self, context, cluster_id, values): """Return created verification for the specified cluster.""" return self._manager.cluster_verification_add( context, _get_id(cluster_id), values) @r.wrap(r.ClusterVerificationResource) def cluster_verification_get(self, context, verification_id): """Return verification with the specified verification_id.""" return self._manager.cluster_verification_get( context, _get_id(verification_id)) @r.wrap(r.ClusterVerificationResource) def cluster_verification_update(self, context, verification_id, values): """Return updated verification with the specified verification_id.""" return self._manager.cluster_verification_update( context, _get_id(verification_id), values) def cluster_verification_delete(self, context, verification_id): """"Delete verification with the specified id.""" return self._manager.cluster_verification_delete( context, _get_id(verification_id)) @r.wrap(r.ClusterHealthCheckResource) def cluster_health_check_add(self, context, verification_id, values): """Return created health check in the specified verification.""" return self._manager.cluster_health_check_add( context, _get_id(verification_id), values) @r.wrap(r.ClusterHealthCheckResource) def cluster_health_check_get(self, context, health_check_id): """Return health check with the specified health_check_id.""" return self._manager.cluster_health_check_get( context, _get_id(health_check_id)) @r.wrap(r.ClusterHealthCheckResource) def cluster_health_check_update(self, context, health_check_id, values): """Return updated health check with the specified health_check_id.""" return self._manager.cluster_health_check_update( context, _get_id(health_check_id), values) def plugin_create(self, context, values): """Return created DB entry for plugin.""" return self._manager.plugin_create(context, values) def plugin_get(self, context, name): """Return DB entry for plugin.""" return self._manager.plugin_get(context, name) def plugin_get_all(self, context): """Return DB entries for all plugins.""" return self._manager.plugin_get_all(context) def plugin_update(self, context, name, values): """Return updated DB entry for plugin.""" return self._manager.plugin_update(context, name, values) def plugin_remove(self, context, name): """Remove DB entry for plugin.""" return self._manager.plugin_remove(context, name) class RemoteApi(LocalApi): """Conductor API that does updates via RPC to the ConductorManager.""" # TODO(slukjanov): it should override _manager and only necessary functions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/conductor/manager.py0000664000175000017500000010262200000000000020306 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Handles database requests from other Sahara services.""" import copy from oslo_config import cfg from sahara.conductor import resource as r from sahara.db import base as db_base from sahara.service.castellan import utils as key_manager from sahara.service.edp.utils import shares from sahara.utils import configs from sahara.utils import crypto CONF = cfg.CONF CLUSTER_DEFAULTS = { "cluster_configs": {}, "status": "undefined", "anti_affinity": [], "anti_affinity_ratio": 1, "status_description": "", "info": {}, "rollback_info": {}, "sahara_info": {}, "is_public": False, "is_protected": False } NODE_GROUP_DEFAULTS = { "node_processes": [], "node_configs": {}, "volumes_per_node": 0, "volumes_size": 0, "volumes_availability_zone": None, "volume_mount_prefix": "/volumes/disk", "volume_type": None, "boot_from_volume": False, "boot_volume_type": None, "boot_volume_availability_zone": None, "boot_volume_local_to_instance": False, "floating_ip_pool": None, "security_groups": None, "auto_security_group": False, "availability_zone": None, "is_proxy_gateway": False, "volume_local_to_instance": False, } NODE_GROUP_TEMPLATE_DEFAULTS = copy.deepcopy(NODE_GROUP_DEFAULTS) NODE_GROUP_TEMPLATE_DEFAULTS.update({"is_public": False, "is_protected": False}) INSTANCE_DEFAULTS = { "volumes": [], "storage_devices_number": 0 } DATA_SOURCE_DEFAULTS = { "credentials": {}, "is_public": False, "is_protected": False } JOB_DEFAULTS = { "is_public": False, "is_protected": False } JOB_BINARY_DEFAULTS = { "is_public": False, "is_protected": False } JOB_BINARY_INTERNAL_DEFAULTS = { "is_public": False, "is_protected": False } JOB_EXECUTION_DEFAULTS = { "is_public": False, "is_protected": False } def _apply_defaults(values, defaults): new_values = copy.deepcopy(defaults) new_values.update(values) return new_values class ConductorManager(db_base.Base): """This class aimed to conduct things. The methods in the base API for sahara-conductor are various proxy operations that allow other services to get specific work done without locally accessing the database. Additionally it performs some template-to-object copying magic. """ def __init__(self): super(ConductorManager, self).__init__() # Common helpers def _populate_node_groups(self, context, cluster): node_groups = cluster.get('node_groups') if not node_groups: return [] populated_node_groups = [] for node_group in node_groups: populated_node_group = self._populate_node_group(context, node_group) self._cleanup_node_group(populated_node_group) populated_node_group["tenant_id"] = context.tenant_id populated_node_groups.append( populated_node_group) return populated_node_groups def _cleanup_node_group(self, node_group): node_group.pop('id', None) node_group.pop('created_at', None) node_group.pop('updated_at', None) def _populate_node_group(self, context, node_group): node_group_merged = copy.deepcopy(NODE_GROUP_DEFAULTS) ng_tmpl_id = node_group.get('node_group_template_id') ng_tmpl = None if ng_tmpl_id: ng_tmpl = self.node_group_template_get(context, ng_tmpl_id) self._cleanup_node_group(ng_tmpl) node_group_merged.update(ng_tmpl) node_group_merged.update(node_group) if ng_tmpl: node_group_merged['node_configs'] = configs.merge_configs( ng_tmpl.get('node_configs'), node_group.get('node_configs')) return node_group_merged # Cluster ops def cluster_get(self, context, cluster, show_progress=False): """Return the cluster or None if it does not exist.""" return self.db.cluster_get(context, cluster, show_progress) def cluster_get_all(self, context, regex_search=False, **kwargs): """Get all clusters filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self.db.cluster_get_all(context, regex_search, **kwargs) def cluster_create(self, context, values): """Create a cluster from the values dictionary.""" # loading defaults merged_values = copy.deepcopy(CLUSTER_DEFAULTS) merged_values['tenant_id'] = context.tenant_id private_key, public_key = crypto.generate_key_pair() merged_values['management_private_key'] = private_key merged_values['management_public_key'] = public_key cluster_template_id = values.get('cluster_template_id') c_tmpl = None if cluster_template_id: c_tmpl = self.cluster_template_get(context, cluster_template_id) del c_tmpl['created_at'] del c_tmpl['updated_at'] del c_tmpl['id'] del c_tmpl['is_public'] del c_tmpl['is_protected'] del c_tmpl['tenant_id'] # updating with cluster_template values merged_values.update(c_tmpl) # updating with values provided in request merged_values.update(values) if c_tmpl: merged_values['cluster_configs'] = configs.merge_configs( c_tmpl.get('cluster_configs'), values.get('cluster_configs')) merged_values['node_groups'] = self._populate_node_groups( context, merged_values) return self.db.cluster_create(context, merged_values) def cluster_update(self, context, cluster, values): """Set the given properties on cluster and update it.""" values = copy.deepcopy(values) update_shares = values.get('shares') if update_shares: original_shares = ( self.db.cluster_get(context, cluster).get('shares', [])) updated_cluster = self.db.cluster_update(context, cluster, values) if update_shares: for share in update_shares: # Only call mount_shares if we have new shares to mount. # We only need one positive case to bother calling mount_shares if share not in original_shares: shares.mount_shares(r.ClusterResource(updated_cluster)) break # Any shares that were on the original, but not on the updated # list will be unmounted unmount_list = [share for share in original_shares if share not in update_shares] if len(unmount_list) > 0: shares.unmount_shares(r.ClusterResource(updated_cluster), unmount_list) return updated_cluster def cluster_destroy(self, context, cluster): """Destroy the cluster or raise if it does not exist.""" self.db.cluster_destroy(context, cluster) # Node Group ops def node_group_add(self, context, cluster, values): """Create a Node Group from the values dictionary.""" values = copy.deepcopy(values) values = self._populate_node_group(context, values) values['tenant_id'] = context.tenant_id return self.db.node_group_add(context, cluster, values) def node_group_update(self, context, node_group, values): """Set the given properties on node_group and update it.""" values = copy.deepcopy(values) self.db.node_group_update(context, node_group, values) def node_group_remove(self, context, node_group): """Destroy the node_group or raise if it does not exist.""" self.db.node_group_remove(context, node_group) # Instance ops def instance_add(self, context, node_group, values): """Create an Instance from the values dictionary.""" values = copy.deepcopy(values) values = _apply_defaults(values, INSTANCE_DEFAULTS) values['tenant_id'] = context.tenant_id return self.db.instance_add(context, node_group, values) def instance_update(self, context, instance, values): """Set the given properties on Instance and update it.""" values = copy.deepcopy(values) self.db.instance_update(context, instance, values) def instance_remove(self, context, instance): """Destroy the Instance or raise if it does not exist.""" self.db.instance_remove(context, instance) # Volumes ops def append_volume(self, context, instance, volume_id): """Append volume_id to instance.""" self.db.append_volume(context, instance, volume_id) def remove_volume(self, context, instance, volume_id): """Remove volume_id in instance.""" self.db.remove_volume(context, instance, volume_id) # Cluster Template ops def cluster_template_get(self, context, cluster_template): """Return the cluster_template or None if it does not exist.""" return self.db.cluster_template_get(context, cluster_template) def cluster_template_get_all(self, context, regex_search=False, **kwargs): """Get all cluster templates filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self.db.cluster_template_get_all(context, regex_search, **kwargs) def cluster_template_create(self, context, values): """Create a cluster_template from the values dictionary.""" values = copy.deepcopy(values) values = _apply_defaults(values, CLUSTER_DEFAULTS) values['tenant_id'] = context.tenant_id values['node_groups'] = self._populate_node_groups(context, values) return self.db.cluster_template_create(context, values) def cluster_template_destroy(self, context, cluster_template, ignore_prot_on_def=False): """Destroy the cluster_template or raise if it does not exist.""" self.db.cluster_template_destroy(context, cluster_template, ignore_prot_on_def) def cluster_template_update(self, context, id, values, ignore_prot_on_def=False): """Update a cluster_template from the values dictionary.""" values = copy.deepcopy(values) values['tenant_id'] = context.tenant_id values['id'] = id if 'node_groups' in values: values['node_groups'] = self._populate_node_groups(context, values) return self.db.cluster_template_update(context, values, ignore_prot_on_def) # Node Group Template ops def node_group_template_get(self, context, node_group_template): """Return the Node Group Template or None if it does not exist.""" return self.db.node_group_template_get(context, node_group_template) def node_group_template_get_all(self, context, regex_search=False, **kwargs): """Get all NodeGroupTemplates filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self.db.node_group_template_get_all(context, regex_search, **kwargs) def node_group_template_create(self, context, values): """Create a Node Group Template from the values dictionary.""" values = copy.deepcopy(values) values = _apply_defaults(values, NODE_GROUP_TEMPLATE_DEFAULTS) values['tenant_id'] = context.tenant_id return self.db.node_group_template_create(context, values) def node_group_template_destroy(self, context, node_group_template, ignore_prot_on_def=False): """Destroy the Node Group Template or raise if it does not exist.""" self.db.node_group_template_destroy(context, node_group_template, ignore_prot_on_def) def node_group_template_update(self, context, id, values, ignore_prot_on_def=False): """Update a Node Group Template from the values dictionary.""" values = copy.deepcopy(values) values['tenant_id'] = context.tenant_id values['id'] = id return self.db.node_group_template_update(context, values, ignore_prot_on_def) # Data Source ops def data_source_get(self, context, data_source): """Return the Data Source or None if it does not exist.""" return self.db.data_source_get(context, data_source) def data_source_get_all(self, context, regex_search=False, **kwargs): """Get all Data Sources filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self.db.data_source_get_all(context, regex_search, **kwargs) def data_source_count(self, context, **kwargs): """Count Data Sources filtered by **kwargs. Uses sqlalchemy "in_" clause for any tuple values Uses sqlalchemy "like" clause for any string values containing % """ return self.db.data_source_count(context, **kwargs) def data_source_create(self, context, values): """Create a Data Source from the values dictionary.""" values = copy.deepcopy(values) values = _apply_defaults(values, DATA_SOURCE_DEFAULTS) values['tenant_id'] = context.tenant_id # if credentials are being passed in, we use the key_manager # to store the password. if (values.get('credentials') and values['credentials'].get('password')): values['credentials']['password'] = key_manager.store_secret( values['credentials']['password'], context) if (values.get('credentials') and values['credentials'].get('secretkey')): values['credentials']['secretkey'] = key_manager.store_secret( values['credentials']['secretkey'], context) return self.db.data_source_create(context, values) def data_source_destroy(self, context, data_source): """Destroy the Data Source or raise if it does not exist.""" # in cases where the credentials to access the data source are # stored with the record and the external key manager is being # used, we need to delete the key from the external manager. if (CONF.use_barbican_key_manager and not CONF.use_domain_for_proxy_users): ds_record = self.data_source_get(context, data_source) if (ds_record.get('credentials') and ds_record['credentials'].get('password')): key_manager.delete_secret( ds_record['credentials']['password'], context) if CONF.use_barbican_key_manager: if (ds_record.get('credentials') and ds_record['credentials'].get('secretkey')): key_manager.delete_secret( ds_record['credentials']['secretkey'], context) return self.db.data_source_destroy(context, data_source) def data_source_update(self, context, id, values): """Update the Data Source or raise if it does not exist.""" values = copy.deepcopy(values) values["id"] = id # in cases where the credentials to access the data source are # stored with the record and the external key manager is being # used, we need to delete the old key from the manager and # create a new one. the other option here would be to retrieve # the previous key and check to see if it has changed, but it # seems less expensive to just delete the old and create a new # one. # it should be noted that the jsonschema validation ensures that # if the proxy domain is not in use then credentials must be # sent with this record. # first we retrieve the original record to get the old key # uuid, and delete it. # next we create the new key. if CONF.use_barbican_key_manager: ds_record = self.data_source_get(context, id) if (ds_record.get('credentials') and ds_record['credentials'].get('password') and not CONF.use_domain_for_proxy_users): key_manager.delete_secret( ds_record['credentials']['password'], context) if (values.get('credentials') and values['credentials'].get('password') and not CONF.use_domain_for_proxy_users): values['credentials']['password'] = key_manager.store_secret( values['credentials']['password'], context) if (ds_record.get('credentials') and ds_record['credentials'].get('secretkey')): key_manager.delete_secret( ds_record['credentials']['secretkey'], context) if (values.get('credentials') and values['credentials'].get('secretkey')): values['credentials']['secretkey'] = key_manager.store_secret( values['credentials']['secretkey'], context) return self.db.data_source_update(context, values) # JobExecution ops def job_execution_get(self, context, job_execution): """Return the JobExecution or None if it does not exist.""" return self.db.job_execution_get(context, job_execution) def job_execution_get_all(self, context, regex_search=False, **kwargs): """Get all JobExecutions filtered by **kwargs. kwargs key values may be the names of fields in a JobExecution plus the following special values with the indicated meaning: 'cluster.name' -- name of the Cluster referenced by the JobExecution 'job.name' -- name of the Job referenced by the JobExecution 'status' -- JobExecution['info']['status'] :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self.db.job_execution_get_all(context, regex_search, **kwargs) def job_execution_count(self, context, **kwargs): """Count number of JobExecutions filtered by **kwargs. e.g. job_execution_count(cluster_id=12, input_id=123) """ return self.db.job_execution_count(context, **kwargs) def job_execution_create(self, context, values): """Create a JobExecution from the values dictionary.""" values = copy.deepcopy(values) values = _apply_defaults(values, JOB_EXECUTION_DEFAULTS) values['tenant_id'] = context.tenant_id return self.db.job_execution_create(context, values) def job_execution_update(self, context, job_execution, values): """Updates a JobExecution from the values dictionary.""" values = copy.deepcopy(values) return self.db.job_execution_update(context, job_execution, values) def job_execution_destroy(self, context, job_execution): """Destroy the JobExecution or raise if it does not exist.""" return self.db.job_execution_destroy(context, job_execution) # Job ops def job_get(self, context, job): """Return the Job or None if it does not exist.""" return self.db.job_get(context, job) def job_get_all(self, context, regex_search=False, **kwargs): """Get all Jobs filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self.db.job_get_all(context, regex_search, **kwargs) def job_create(self, context, values): """Create a Job from the values dictionary.""" values = copy.deepcopy(values) values = _apply_defaults(values, JOB_DEFAULTS) values['tenant_id'] = context.tenant_id return self.db.job_create(context, values) def job_update(self, context, job, values): """Updates a Job from the values dictionary.""" return self.db.job_update(context, job, values) def job_destroy(self, context, job): """Destroy the Job or raise if it does not exist.""" self.db.job_destroy(context, job) # JobBinary ops def job_binary_get_all(self, context, regex_search=False, **kwargs): """Get all JobBinarys filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search e.g. job_binary_get_all(name='wordcount.jar') """ return self.db.job_binary_get_all(context, regex_search, **kwargs) def job_binary_get(self, context, job_binary_id): """Return the JobBinary or None if it does not exist.""" return self.db.job_binary_get(context, job_binary_id) def job_binary_create(self, context, values): """Create a JobBinary from the values dictionary.""" values = copy.deepcopy(values) values = _apply_defaults(values, JOB_BINARY_DEFAULTS) values['tenant_id'] = context.tenant_id # if credentials are being passed in, we use the key_manager # to store the password. if values.get('extra') and values['extra'].get('password'): values['extra']['password'] = key_manager.store_secret( values['extra']['password'], context) if values.get('extra') and values['extra'].get('secretkey'): values['extra']['secretkey'] = key_manager.store_secret( values['extra']['secretkey'], context) return self.db.job_binary_create(context, values) def job_binary_destroy(self, context, job_binary): """Destroy the JobBinary or raise if it does not exist.""" # in cases where the credentials to access the job binary are # stored with the record and the external key manager is being # used, we need to delete the key from the external manager. if CONF.use_barbican_key_manager: jb_record = self.job_binary_get(context, job_binary) if not CONF.use_domain_for_proxy_users: if (jb_record.get('extra') and jb_record['extra'].get('password')): key_manager.delete_secret(jb_record['extra']['password'], context) if (jb_record.get('extra') and jb_record['extra'].get('secretkey')): key_manager.delete_secret(jb_record['extra']['secretkey'], context) self.db.job_binary_destroy(context, job_binary) def job_binary_update(self, context, id, values): """Update a JobBinary from the values dictionary.""" values = copy.deepcopy(values) values['id'] = id # in cases where the credentials to access the job binary are # stored with the record and the external key manager is being # used, we need to delete the old key from the manager and # create a new one. the other option here would be to retrieve # the previous key and check to see if it has changed, but it # seems less expensive to just delete the old and create a new # one. if CONF.use_barbican_key_manager: # first we retrieve the original record to get the old key # uuid, and delete it. # next we create the new key. jb_record = self.job_binary_get(context, id) if not CONF.use_domain_for_proxy_users: if (jb_record.get('extra') and jb_record['extra'].get('password')): key_manager.delete_secret(jb_record['extra']['password'], context) if values.get('extra') and values['extra'].get('password'): values['extra']['password'] = key_manager.store_secret( values['extra']['password'], context) if jb_record.get('extra') and jb_record['extra'].get('secretkey'): key_manager.delete_secret(jb_record['extra']['secretkey'], context) if values.get('extra') and values['extra'].get('secretkey'): values['extra']['secretkey'] = key_manager.store_secret( values['extra']['secretkey'], context) return self.db.job_binary_update(context, values) # JobBinaryInternal ops def job_binary_internal_get_all(self, context, regex_search=False, **kwargs): """Get all JobBinaryInternals filtered by **kwargs. The JobBinaryInternals returned do not contain a data field. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return self.db.job_binary_internal_get_all(context, regex_search, **kwargs) def job_binary_internal_get(self, context, job_binary_internal_id): """Return the JobBinaryInternal or None if it does not exist The JobBinaryInternal returned does not contain a data field. """ return self.db.job_binary_internal_get(context, job_binary_internal_id) def job_binary_internal_create(self, context, values): """Create a JobBinaryInternal from the values dictionary.""" # Since values["data"] is (should be) encoded as a string # here the deepcopy of values only incs a reference count on data. # This is nice, since data could be big... values = copy.deepcopy(values) values = _apply_defaults(values, JOB_BINARY_INTERNAL_DEFAULTS) values['tenant_id'] = context.tenant_id return self.db.job_binary_internal_create(context, values) def job_binary_internal_destroy(self, context, job_binary_internal): """Destroy the JobBinaryInternal or raise if it does not exist.""" self.db.job_binary_internal_destroy(context, job_binary_internal) def job_binary_internal_get_raw_data(self, context, job_binary_internal_id): """Return the binary data field from a JobBinaryInternal.""" return self.db.job_binary_internal_get_raw_data( context, job_binary_internal_id) def job_binary_internal_update(self, context, id, values): """Updates a JobBinaryInternal from the values dictionary.""" return self.db.job_binary_internal_update(context, id, values) # Events ops def cluster_provision_step_add(self, context, cluster_id, values): """Create a provisioning step assigned to cluster from values dict.""" return self.db.cluster_provision_step_add(context, cluster_id, values) def cluster_provision_step_update(self, context, provision_step): """Update the cluster provisioning step.""" return self.db.cluster_provision_step_update(context, provision_step) def cluster_provision_progress_update(self, context, cluster_id): """Return cluster with provision progress updated field.""" return self.db.cluster_provision_progress_update(context, cluster_id) def cluster_event_add(self, context, provision_step, values): """Assign new event to the specified provision step.""" return self.db.cluster_event_add(context, provision_step, values) # Cluster verifications / health checks ops def cluster_verification_add(self, context, cluster_id, values): """Return created verification for the specified cluster.""" return self.db.cluster_verification_add(context, cluster_id, values) def cluster_verification_get(self, context, verification_id): """Return verification with the specified verification_id.""" return self.db.cluster_verification_get(context, verification_id) def cluster_verification_update(self, context, verification_id, values): """Return updated verification with the specified verification_id.""" return self.db.cluster_verification_update( context, verification_id, values) def cluster_verification_delete(self, context, verification_id): """"Delete verification with the specified id.""" return self.db.cluster_verification_delete(context, verification_id) def cluster_health_check_add(self, context, verification_id, values): """Return created health check in the specified verification.""" return self.db.cluster_health_check_add( context, verification_id, values) def cluster_health_check_get(self, context, health_check_id): """Return health check with the specified health_check_id.""" return self.db.cluster_health_check_get(context, health_check_id) def cluster_health_check_update(self, context, health_check_id, values): """Return updated health check with the specified health_check_id.""" return self.db.cluster_health_check_update( context, health_check_id, values) def plugin_create(self, context, values): """Return created DB entry for plugin.""" return self.db.plugin_create(context, values) def plugin_get(self, context, name): """Return DB entry for plugin.""" return self.db.plugin_get(context, name) def plugin_get_all(self, context): """Return DB entries for all plugins.""" return self.db.plugin_get_all(context) def plugin_update(self, context, name, values): """Return updated DB entry for plugin.""" return self.db.plugin_update(context, name, values) def plugin_remove(self, context, name): """Remove DB entry for plugin.""" return self.db.plugin_remove(context, name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/conductor/objects.py0000664000175000017500000002215600000000000020330 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """This module contains description of objects returned by the conductor. The actual objects returned are located in resource.py, which aim is to hide some necessary magic. Current module describes objects fields via docstrings and contains implementation of helper methods. """ import random from oslo_config import cfg from sahara.utils import configs from sahara.utils import remote CONF = cfg.CONF CONF.import_opt('node_domain', 'sahara.config') class Cluster(object): """An object representing Cluster. id name description tenant_id trust_id is_transient plugin_name hadoop_version cluster_configs - configs dict converted to object, see the docs for details default_image_id anti_affinity anti_affinity_ratio management_private_key management_public_key user_keypair_id status status_description info extra rollback_info - internal information required for rollback sahara_info - internal information about sahara settings provision_progress - list of ProvisionStep objects node_groups - list of NodeGroup objects cluster_template_id cluster_template - ClusterTemplate object use_autoconfig is_public is_protected domain_name """ def has_proxy_gateway(self): for ng in self.node_groups: if ng.is_proxy_gateway: return True def get_proxy_gateway_node(self): proxies = [] for ng in self.node_groups: if ng.is_proxy_gateway and ng.instances: proxies += ng.instances if proxies: return random.choice(proxies) return None @property def stack_name(self): extra = self.extra or {} return extra.get('heat_stack_name', self.name) def use_designate_feature(self): return CONF.use_designate and self.domain_name class NodeGroup(object): """An object representing Node Group. id name flavor_id image_id image_username node_processes - list of node processes node_configs - configs dict converted to object, see the docs for details volumes_per_node volumes_size volumes_availability_zone - name of Cinder availability zone where to spawn volumes volume_mount_prefix volume_type boot_from_volume - If set to True, the base image will be converted to a bootable volume. boot_volume_type boot_volume_availability_zone - name of Cinder availability zone for spawning bootable volume. boot_volume_local_to_instance - indicates if boot volume and instance should be c reated on the same physical host. floating_ip_pool - Floating IP Pool name used to assign Floating IPs to instances in this Node Group security_groups - List of security groups for instances in this Node Group auto_security_group - indicates if Sahara should create additional security group for the Node Group availability_zone - name of Nova availability zone where to spawn instances open_ports - List of ports that will be opened if auto_security_group is True is_proxy_gateway - indicates if nodes from this node group should be used as proxy to access other cluster nodes volume_local_to_instance - indicates if volumes and instances should be created on the same physical host count instances - list of Instance objects node_group_template_id node_group_template - NodeGroupTemplate object If node group belongs to cluster: cluster_id - parent Cluster ID cluster - parent Cluster object If node group belongs to cluster template: cluster_template_id - parent ClusterTemplate ID cluster_template - parent ClusterTemplate object """ def configuration(self): return configs.merge_configs(self.cluster.cluster_configs, self.node_configs) def get_image_id(self): return self.image_id or self.cluster.default_image_id class Instance(object): """An object representing Instance. id node_group_id - parent NodeGroup ID node_group - parent NodeGroup object instance_id - Nova instance ID instance_name internal_ip management_ip volumes storage_devices_number dns_hostname """ def hostname(self): return self.instance_name def fqdn(self): if self._use_designate_feature(): return self.dns_hostname else: return self.instance_name + '.' + CONF.node_domain def get_ip_or_dns_name(self): if self._use_designate_feature(): return self.dns_hostname else: return self.management_ip def remote(self): return remote.get_remote(self) def storage_paths(self): mp = [] for idx in range(1, self.storage_devices_number + 1): mp.append(self.node_group.volume_mount_prefix + str(idx)) if not mp: mp = ['/mnt'] return mp def _use_designate_feature(self): return CONF.use_designate and self.dns_hostname class ClusterTemplate(object): """An object representing Cluster Template. id name description cluster_configs - configs dict converted to object, see the docs for details default_image_id anti_affinity tenant_id plugin_name hadoop_version node_groups - list of NodeGroup objects is_public is_protected domain_name """ class NodeGroupTemplate(object): """An object representing Node Group Template. id name description tenant_id flavor_id image_id plugin_name hadoop_version node_processes - list of node processes node_configs - configs dict converted to object, see the docs for details volumes_per_node volumes_size volumes_availability_zone volume_mount_prefix volume_type boot_from_volume boot_volume_type boot_volume_availability_zone boot_volume_local_to_instance floating_ip_pool security_groups auto_security_group availability_zone is_proxy_gateway volume_local_to_instance is_public is_protected """ class Image(object): """An object representing Image. id tags username description """ # EDP Objects class DataSource(object): """An object representing Data Source. id tenant_id name description type url credentials is_public is_protected """ class JobExecution(object): """An object representing JobExecution id tenant_id job_id input_id output_id start_time end_time cluster_id info engine_job_id return_code job_configs interface extra data_source_urls is_public is_protected """ class Job(object): """An object representing Job id tenant_id name description type mains libs interface is_public is_protected """ class JobBinary(object): """An object representing JobBinary id tenant_id name description url - URLs may be the following: internal-db://URL, swift:// extra - extra may contain not only user-password but e.g. auth-token is_public is_protected """ class JobBinaryInternal(object): """An object representing JobBinaryInternal Note that the 'data' field is not returned. It uses deferred loading and must be requested explicitly with the job_binary_get_raw_data() conductor method. id tenant_id name datasize is_public is_protected """ # Events ops class ClusterProvisionStep(object): """An object representing cluster ProvisionStep id cluster_id tenant_id step_name step_type total successful events - list of Events objects assigned to the cluster """ class ClusterEvent(object): """An object representing events about cluster provision id node_group_id instance_id instance_name event_info successful step_id """ class ClusterVerification(object): """An object representing cluster verification id cluster_id status checks """ class ClusterHealthCheck(object): """An object representing health check id verification_id status description name """ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/conductor/resource.py0000664000175000017500000002352600000000000020530 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides means to wrap dicts coming from DB layer in objects. The Conductor can fetch only values represented by JSON. That limitation comes from Oslo RPC implementation. This module provides means to wrap a fetched value, always dictionary, into an immutable Resource object. A descendant of Resource class might provide back references to parent objects and helper methods. """ import datetime import six from sahara.conductor import objects from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp import s3_common from sahara.swift import swift_helper from sahara.utils import types def wrap(resource_class): """A decorator wraps dict returned by a given function into a Resource.""" def decorator(func): def handle(*args, **kwargs): ret = func(*args, **kwargs) if isinstance(ret, types.Page): return types.Page([resource_class(el) for el in ret], ret.prev, ret.next) elif isinstance(ret, list): return [resource_class(el) for el in ret] elif ret: return resource_class(ret) else: return None return handle return decorator class Resource(types.FrozenDict): """Represents dictionary as an immutable object. Enhancing it with back references and helper methods. For instance, the following dictionary: {'first': {'a': 1, 'b': 2}, 'second': [1,2,3]} after wrapping with Resource will look like an object, let it be 'res' with the following fields: res.first res.second 'res.first' will in turn be wrapped into Resource with two fields: res.first.a == 1 res.first.b == 2 'res.second', which is a list, will be transformed into a tuple for immutability: res.second == (1,2,3) Additional helper methods could be specified in descendant classes. '_children' specifies children of that specific Resource in the following format: {refname: (child_class, backref_name)} Back reference is a reference to parent object which is injected into a Resource during wrapping. """ _resource_name = 'resource' _children = {} _filter_fields = [] _sanitize_fields = {} def __init__(self, dct): super(Resource, self).__setattr__('_initial_dict', dct) newdct = dict() for refname, entity in six.iteritems(dct): newdct[refname] = self._wrap_entity(refname, entity) super(Resource, self).__init__(newdct) def to_dict(self): """Return dictionary representing the Resource for REST API. On the way filter out fields which shouldn't be exposed. """ return self._to_dict(None) def to_wrapped_dict(self): return {self._resource_name: self.to_dict()} # Construction def _wrap_entity(self, refname, entity): if isinstance(entity, Resource): # that is a back reference return entity elif isinstance(entity, list): return self._wrap_list(refname, entity) elif isinstance(entity, dict): return self._wrap_dict(refname, entity) elif self._is_passthrough_type(entity): return entity else: raise TypeError(_("Unsupported type: %s") % type(entity).__name__) def _wrap_list(self, refname, lst): newlst = [self._wrap_entity(refname, entity) for entity in lst] return types.FrozenList(newlst) def _wrap_dict(self, refname, dct): if refname in self._children: dct = dict(dct) child_class = self._children[refname][0] backref_name = self._children[refname][1] if backref_name: dct[backref_name] = self return child_class(dct) else: return Resource(dct) def _is_passthrough_type(self, entity): return (entity is None or isinstance(entity, (six.integer_types, float, datetime.datetime, six.string_types))) # Conversion to dict def _to_dict(self, backref): dct = dict() for refname, entity in six.iteritems(self): if refname != backref and refname not in self._filter_fields: childs_backref = None if refname in self._children: childs_backref = self._children[refname][1] dct[refname] = self._entity_to_dict(entity, childs_backref) sanitize = self._sanitize_fields.get(refname) if sanitize is not None: dct[refname] = sanitize(self, dct[refname]) return dct def _entity_to_dict(self, entity, childs_backref): if isinstance(entity, Resource): return entity._to_dict(childs_backref) elif isinstance(entity, list): return self._list_to_dict(entity, childs_backref) elif entity is not None: return entity def _list_to_dict(self, lst, childs_backref): return [self._entity_to_dict(entity, childs_backref) for entity in lst] def __getattr__(self, item): return self[item] def __setattr__(self, *args): raise ex.FrozenClassError(self) class NodeGroupTemplateResource(Resource, objects.NodeGroupTemplate): _resource_name = 'node_group_template' class InstanceResource(Resource, objects.Instance): _filter_fields = ['tenant_id', 'node_group_id', "volumes"] @property def cluster_id(self): return self.node_group.cluster_id @property def cluster(self): return self.node_group.cluster class NodeGroupResource(Resource, objects.NodeGroup): _children = { 'instances': (InstanceResource, 'node_group'), 'node_group_template': (NodeGroupTemplateResource, None) } _filter_fields = ['tenant_id', 'cluster_id', 'cluster_template_id', 'image_username', 'open_ports'] class ClusterTemplateResource(Resource, objects.ClusterTemplate): _resource_name = 'cluster_template' _children = { 'node_groups': (NodeGroupResource, 'cluster_template') } class ClusterHealthCheckResource(Resource, objects.ClusterHealthCheck): _resource_name = 'cluster_health_check' class ClusterVerificationResource(Resource, objects.ClusterVerification): _resource_name = 'cluster_verification' _children = { 'checks': (ClusterHealthCheckResource, 'verification') } class ClusterResource(Resource, objects.Cluster): def sanitize_cluster_configs(self, cluster_configs): if 'proxy_configs' in cluster_configs: del cluster_configs['proxy_configs'] return cluster_configs _resource_name = 'cluster' _children = { 'node_groups': (NodeGroupResource, 'cluster'), 'cluster_template': (ClusterTemplateResource, None), 'verification': (ClusterVerificationResource, 'cluster') } _filter_fields = ['management_private_key', 'extra', 'rollback_info', 'sahara_info'] _sanitize_fields = {'cluster_configs': sanitize_cluster_configs} class ImageResource(Resource, objects.Image): _resource_name = 'image' @property def dict(self): return self.to_dict() @property def wrapped_dict(self): return {'image': self.dict} def _sanitize_image_properties(self, image_props): if 'links' in image_props: del image_props['links'] return image_props _sanitize_fields = {'links': _sanitize_image_properties} # EDP Resources class DataSource(Resource, objects.DataSource): _resource_name = "data_source" _filter_fields = ['credentials'] class JobExecution(Resource, objects.JobExecution): def sanitize_job_configs(self, job_configs): if 'configs' in job_configs: configs = job_configs['configs'] if swift_helper.HADOOP_SWIFT_USERNAME in configs: configs[swift_helper.HADOOP_SWIFT_USERNAME] = "" if swift_helper.HADOOP_SWIFT_PASSWORD in configs: configs[swift_helper.HADOOP_SWIFT_PASSWORD] = "" if s3_common.S3_ACCESS_KEY_CONFIG in configs: configs[s3_common.S3_ACCESS_KEY_CONFIG] = "" if s3_common.S3_SECRET_KEY_CONFIG in configs: configs[s3_common.S3_SECRET_KEY_CONFIG] = "" if 'trusts' in job_configs: del job_configs['trusts'] if 'proxy_configs' in job_configs: del job_configs['proxy_configs'] return job_configs def sanitize_info(self, info): if 'actions' in info: for d in info['actions']: if 'conf' in d: del d['conf'] return info _resource_name = "job_execution" _filter_fields = ['extra'] _sanitize_fields = {'job_configs': sanitize_job_configs, 'info': sanitize_info} # TODO(egafford): Sanitize interface ("secret" bool field on job args?) class JobBinary(Resource, objects.JobBinary): _resource_name = "job_binary" _filter_fields = ['extra'] class JobBinaryInternal(Resource, objects.JobBinaryInternal): _resource_name = "job_binary_internal" class Job(Resource, objects.Job): _resource_name = "job" _children = { 'mains': (JobBinary, None), 'libs': (JobBinary, None) } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/config.py0000664000175000017500000002162500000000000016144 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools # loading keystonemiddleware opts because sahara uses these options in code from keystonemiddleware import opts # noqa from oslo_config import cfg from oslo_log import log from sahara import exceptions as ex from sahara.i18n import _ from sahara.plugins import opts as plugins_base from sahara.service.castellan import config as castellan from sahara.service.edp.data_sources import opts as data_source from sahara.service.edp.job_binaries import opts as job_binary from sahara.topology import topology_helper from sahara.utils.notification import sender from sahara.utils.openstack import cinder from sahara.utils.openstack import keystone from sahara.utils import remote from sahara import version cli_opts = [ cfg.HostAddressOpt('host', default='0.0.0.0', help='Hostname or IP address that will be used ' 'to listen on.'), cfg.PortOpt('port', default=8386, help='Port that will be used to listen on.'), cfg.BoolOpt('log-exchange', default=False, help='Log request/response exchange details: environ, ' 'headers and bodies.') ] edp_opts = [ cfg.IntOpt('job_binary_max_KB', default=5120, help='Maximum length of job binary data in kilobytes that ' 'may be stored or retrieved in a single operation.'), cfg.IntOpt('job_canceling_timeout', default=300, help='Timeout for canceling job execution (in seconds). ' 'Sahara will try to cancel job execution during ' 'this time.'), cfg.BoolOpt('edp_internal_db_enabled', default=True, help='Use Sahara internal db to store job binaries.') ] db_opts = [ cfg.StrOpt('db_driver', default='sahara.db', help='Driver to use for database access.') ] networking_opts = [ cfg.BoolOpt('use_floating_ips', default=True, help='If set to True, Sahara will use floating IPs to ' 'communicate with instances. To make sure that all ' 'instances have floating IPs assigned, make sure ' 'that all Node Groups have "floating_ip_pool" ' 'parameter defined.'), cfg.StrOpt('node_domain', default='novalocal', help="The suffix of the node's FQDN."), cfg.BoolOpt('use_namespaces', default=False, help="Use network namespaces for communication."), cfg.BoolOpt('use_rootwrap', default=False, help="Use rootwrap facility to allow non-root users to run " "the sahara services and access private network IPs " "(only valid to use in conjunction with " "use_namespaces=True)"), cfg.StrOpt('rootwrap_command', default='sudo sahara-rootwrap /etc/sahara/rootwrap.conf', help="Rootwrap command to leverage. Use in conjunction with " "use_rootwrap=True") ] dns_opts = [ cfg.BoolOpt('use_designate', default=False, help='Use Designate for internal and external hostnames ' 'resolution'), cfg.ListOpt('nameservers', default=[], help="IP addresses of Designate nameservers. " "This is required if 'use_designate' is True") ] accessible_ip_opts = [ cfg.IPOpt('identity_ip_accessible', default=None, help='IP address of Keystone endpoint, accessible by tenant' ' machines. If not set, the results of the DNS lookup' ' performed where Sahara services are running will be' ' used.'), cfg.IPOpt('object_store_ip_accessible', default=None, help='IP address of Swift endpoint, accessible by tenant' ' machines. If not set, the results of the DNS lookup' ' performed where Sahara services are running will be' ' used.'), ] CONF = cfg.CONF CONF.register_cli_opts(cli_opts) CONF.register_opts(networking_opts) CONF.register_opts(edp_opts) CONF.register_opts(db_opts) CONF.register_opts(dns_opts) CONF.register_opts(accessible_ip_opts) log.register_options(CONF) sahara_default_log_levels = [ 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'paramiko=WARN', 'requests=WARN', 'neutronclient=INFO', ] log.set_defaults( default_log_levels=log.get_default_log_levels()+sahara_default_log_levels) def list_opts(): # NOTE (vgridnev): we make these import here to avoid problems # with importing unregistered options in sahara code. # As example, importing 'node_domain' in # sahara/conductor/objects.py from sahara.conductor import api from sahara import main as sahara_main from sahara.service import coordinator from sahara.service.edp import job_utils from sahara.service.heat import heat_engine from sahara.service.heat import templates from sahara.service import ntp_service from sahara.service import periodic from sahara.swift import swift_helper from sahara.utils import cluster_progress_ops as cpo from sahara.utils.openstack import base from sahara.utils.openstack import glance from sahara.utils.openstack import heat from sahara.utils.openstack import manila from sahara.utils.openstack import neutron from sahara.utils.openstack import nova from sahara.utils.openstack import swift from sahara.utils import poll_utils from sahara.utils import proxy from sahara.utils import ssh_remote return [ (None, itertools.chain(cli_opts, edp_opts, networking_opts, dns_opts, db_opts, accessible_ip_opts, plugins_base.opts, topology_helper.opts, keystone.opts, remote.ssh_opts, sahara_main.opts, job_utils.opts, periodic.periodic_opts, coordinator.coordinator_opts, ntp_service.ntp_opts, proxy.opts, cpo.event_log_opts, base.opts, heat_engine.heat_engine_opts, templates.heat_engine_opts, ssh_remote.ssh_config_options, castellan.opts, data_source.opts, job_binary.opts)), (poll_utils.timeouts.name, itertools.chain(poll_utils.timeouts_opts)), (api.conductor_group.name, itertools.chain(api.conductor_opts)), (cinder.cinder_group.name, itertools.chain(cinder.opts)), (glance.glance_group.name, itertools.chain(glance.opts)), (heat.heat_group.name, itertools.chain(heat.opts)), (manila.manila_group.name, itertools.chain(manila.opts)), (neutron.neutron_group.name, itertools.chain(neutron.opts)), (nova.nova_group.name, itertools.chain(nova.opts)), (swift.swift_group.name, itertools.chain(swift.opts)), (keystone.keystone_group.name, itertools.chain(keystone.ssl_opts)), (keystone.trustee_group.name, itertools.chain(keystone.trustee_opts)), (base.retries.name, itertools.chain(base.opts)), (swift_helper.public_endpoint_cert_group.name, itertools.chain(swift_helper.opts)), (castellan.castellan_group.name, itertools.chain(castellan.castellan_opts)), (sender.notifier_opts_group, sender.notifier_opts) ] def parse_configs(conf_files=None): try: version_string = version.version_info.version_string() CONF(project='sahara', version=version_string, default_config_files=conf_files) except cfg.RequiredOptError as roe: raise ex.ConfigurationError( _("Option '%(option)s' is required for config group '%(group)s'") % {'option': roe.opt_name, 'group': roe.group.name}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/context.py0000664000175000017500000002326400000000000016364 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import traceback import eventlet from eventlet.green import threading from eventlet.green import time from eventlet import greenpool from eventlet import semaphore from oslo_config import cfg from oslo_context import context from oslo_log import log as logging from sahara import exceptions as ex from sahara.i18n import _ from sahara.service import sessions CONF = cfg.CONF LOG = logging.getLogger(__name__) class Context(context.RequestContext): def __init__(self, user_id=None, tenant_id=None, auth_token=None, service_catalog=None, username=None, tenant_name=None, roles=None, is_admin=None, remote_semaphore=None, resource_uuid=None, current_instance_info=None, request_id=None, auth_plugin=None, overwrite=True, **kwargs): if kwargs: LOG.warning('Arguments dropped when creating context: ' '{args}'.format(args=kwargs)) super(Context, self).__init__(auth_token=auth_token, user_id=user_id, project_id=tenant_id, is_admin=is_admin, resource_uuid=resource_uuid, request_id=request_id, roles=roles) self.service_catalog = service_catalog self.username = username self.tenant_name = tenant_name self.remote_semaphore = remote_semaphore or semaphore.Semaphore( CONF.cluster_remote_threshold) self.auth_plugin = auth_plugin if overwrite or not hasattr(context._request_store, 'context'): self.update_store() if current_instance_info is not None: self.current_instance_info = current_instance_info else: self.current_instance_info = InstanceInfo() def clone(self): return Context( self.user_id, self.tenant_id, self.auth_token, self.service_catalog, self.username, self.tenant_name, self.roles, self.is_admin, self.remote_semaphore, self.resource_uuid, self.current_instance_info, self.request_id, self.auth_plugin, overwrite=False) def to_dict(self): d = super(Context, self).to_dict() d.update({ 'user_id': self.user_id, 'tenant_id': self.tenant_id, 'service_catalog': self.service_catalog, 'username': self.username, 'tenant_name': self.tenant_name, 'user_name': self.username, 'project_name': self.tenant_name}) return d def is_auth_capable(self): return (self.service_catalog and self.auth_token and self.project_id and self.user_id) # NOTE(adrienverge): The Context class uses the 'tenant' property # internally (inherited from oslo_context), but Sahara code often uses # 'tenant_id'. @property def user_id(self): return self.user @user_id.setter def user_id(self, value): self.user = value @property def tenant_id(self): return self.project_id @tenant_id.setter def tenant_id(self, value): self.project_id = value def get_admin_context(): return Context(is_admin=True, overwrite=False) _CTX_STORE = threading.local() _CTX_KEY = 'current_ctx' def has_ctx(): return hasattr(_CTX_STORE, _CTX_KEY) def ctx(): if not has_ctx(): raise ex.IncorrectStateError(_("Context isn't available here")) return getattr(_CTX_STORE, _CTX_KEY) def current(): return ctx() def set_ctx(new_ctx): if not new_ctx and has_ctx(): delattr(_CTX_STORE, _CTX_KEY) if hasattr(context._request_store, 'context'): delattr(context._request_store, 'context') if new_ctx: setattr(_CTX_STORE, _CTX_KEY, new_ctx) setattr(context._request_store, 'context', new_ctx) def _wrapper(ctx, thread_description, thread_group, func, *args, **kwargs): try: set_ctx(ctx) func(*args, **kwargs) except BaseException as e: LOG.debug( "Thread {thread} failed with exception: {exception}".format( thread=thread_description, exception=e)) if thread_group and not thread_group.exc: thread_group.exc = e thread_group.exc_stacktrace = traceback.format_exc() thread_group.failed_thread = thread_description finally: if thread_group: thread_group._on_thread_exit() set_ctx(None) def spawn(thread_description, func, *args, **kwargs): eventlet.spawn(_wrapper, current().clone(), thread_description, None, func, *args, **kwargs) class ThreadGroup(object): """ThreadGroup object. It is advised to use TreadGroup as a context manager instead of instantiating and calling _wait() manually. The __exit__() guaranties to exit only after all child threads are done, even if spawning code have thrown an exception """ def __init__(self, thread_pool_size=1000): self.tg = greenpool.GreenPool(size=thread_pool_size) self.exc = None self.exc_stacktrace = None self.failed_thread = None self.threads = 0 self.cv = threading.Condition() def spawn(self, thread_description, func, *args, **kwargs): self.tg.spawn(_wrapper, current().clone(), thread_description, self, func, *args, **kwargs) with self.cv: self.threads += 1 def _on_thread_exit(self): with self.cv: self.threads -= 1 if self.threads == 0: self.cv.notifyAll() # NOTE(dmitryme): A little rationale on why we reimplemented wait(): # * Eventlet's GreenPool.wait() can hung # * Oslo's ThreadGroup.wait() can exit before all threads are done # def _wait(self): """Using of _wait() method. It is preferred to use the class as a context manager and do not use _wait() directly, see class docstring for an explanation. """ with self.cv: while self.threads > 0: self.cv.wait() if self.exc: raise ex.ThreadException(self.failed_thread, self.exc, self.exc_stacktrace) def __enter__(self): return self def __exit__(self, *ex): if not any(ex): self._wait() else: # If spawning code thrown an exception, it had higher priority # for us than the one thrown inside child thread (if any) try: self._wait() except Exception: # that will make __exit__ throw original exception pass def sleep(seconds=0): time.sleep(seconds) class InstanceInfo(object): def __init__(self, cluster_id=None, instance_id=None, instance_name=None, node_group_id=None, step_type=None, step_id=None): self.cluster_id = cluster_id self.instance_id = instance_id self.instance_name = instance_name self.node_group_id = node_group_id self.step_type = step_type self.step_id = step_id def set_step_type(step_type): current().current_instance_info.step_type = step_type class InstanceInfoManager(object): def __init__(self, instance_info): self.prev_instance_info = current().current_instance_info if not instance_info.step_type: instance_info.step_type = self.prev_instance_info.step_type if not instance_info.step_id: instance_info.step_id = self.prev_instance_info.step_id current().current_instance_info = instance_info def __enter__(self): pass def __exit__(self, *args): current().current_instance_info = self.prev_instance_info def set_current_cluster_id(cluster_id): current().resource_uuid = 'none, cluster: %s' % cluster_id def set_current_job_execution_id(je_id): current().resource_uuid = 'none, job_execution: %s' % je_id class SetCurrentInstanceId(object): def __init__(self, instance_id): ctx = current() self.prev_uuid = ctx.resource_uuid if ctx.resource_uuid: ctx.resource_uuid = ctx.resource_uuid.replace('none', instance_id) context.get_current().resource_uuid = ctx.resource_uuid def __enter__(self): pass def __exit__(self, *ex): current().resource_uuid = self.prev_uuid context.get_current().resource_uuid = self.prev_uuid def set_current_instance_id(instance_id): return SetCurrentInstanceId(instance_id) def get_auth_token(): cur = current() if cur.auth_plugin: try: cur.auth_token = sessions.cache().token_for_auth(cur.auth_plugin) except Exception as e: LOG.warning("Cannot update token, reason: %s", e) return cur.auth_token ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.697891 sahara-16.0.0/sahara/db/0000775000175000017500000000000000000000000014704 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/__init__.py0000664000175000017500000000121600000000000017015 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ DB abstraction for Sahara """ from sahara.db.api import * # noqa ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/api.py0000664000175000017500000004726500000000000016045 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Defines interface for DB access. Functions in this module are imported into the sahara.db namespace. Call these functions from sahara.db namespace, not the sahara.db.api namespace. All functions in this module return objects that implement a dictionary-like interface. **Related Flags** :db_backend: string to lookup in the list of LazyPluggable backends. `sqlalchemy` is the only supported backend right now. :sql_connection: string specifying the sqlalchemy connection to use, like: `mysql://user:password@localhost/sahara`. """ from oslo_config import cfg from oslo_db import api as db_api from oslo_db import options from sahara.utils import types CONF = cfg.CONF options.set_defaults(CONF) _BACKEND_MAPPING = { 'sqlalchemy': 'sahara.db.sqlalchemy.api', } IMPL = db_api.DBAPI.from_config(CONF, backend_mapping=_BACKEND_MAPPING) def setup_db(): """Set up database, create tables, etc. Return True on success, False otherwise """ return IMPL.setup_db() def drop_db(): """Drop database. Return True on success, False otherwise """ return IMPL.drop_db() # Helpers for building constraints / equality checks def constraint(**conditions): """Return a constraint object suitable for use with some updates.""" return IMPL.constraint(**conditions) def equal_any(*values): """Return an equality condition object suitable for use in a constraint. Equal_any conditions require that a model object's attribute equal any one of the given values. """ return IMPL.equal_any(*values) def not_equal(*values): """Return an inequality condition object suitable for use in a constraint. Not_equal conditions require that a model object's attribute differs from all of the given values. """ return IMPL.not_equal(*values) def to_dict(func): def decorator(*args, **kwargs): res = func(*args, **kwargs) if isinstance(res, types.Page): return types.Page([item.to_dict() for item in res], res.prev, res.next) if isinstance(res, list): return [item.to_dict() for item in res] if res: return res.to_dict() else: return None return decorator # Cluster ops def cluster_get(context, cluster, show_progress=False): """Return the cluster or None if it does not exist.""" if show_progress: cluster = IMPL.cluster_provision_progress_update(context, cluster) else: cluster = IMPL.cluster_get(context, cluster) if cluster: return cluster.to_dict(show_progress) return None @to_dict def cluster_get_all(context, regex_search=False, **kwargs): """Get all clusters filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.cluster_get_all(context, regex_search, **kwargs) @to_dict def cluster_create(context, values): """Create a cluster from the values dictionary.""" return IMPL.cluster_create(context, values) @to_dict def cluster_update(context, cluster, values): """Set the given properties on cluster and update it.""" return IMPL.cluster_update(context, cluster, values) def cluster_destroy(context, cluster): """Destroy the cluster or raise if it does not exist.""" IMPL.cluster_destroy(context, cluster) # Node Group ops def node_group_add(context, cluster, values): """Create a Node Group from the values dictionary.""" return IMPL.node_group_add(context, cluster, values) def node_group_update(context, node_group, values): """Set the given properties on node_group and update it.""" IMPL.node_group_update(context, node_group, values) def node_group_remove(context, node_group): """Destroy the node_group or raise if it does not exist.""" IMPL.node_group_remove(context, node_group) # Instance ops def instance_add(context, node_group, values): """Create an Instance from the values dictionary.""" return IMPL.instance_add(context, node_group, values) def instance_update(context, instance, values): """Set the given properties on Instance and update it.""" IMPL.instance_update(context, instance, values) def instance_remove(context, instance): """Destroy the Instance or raise if it does not exist.""" IMPL.instance_remove(context, instance) # Volumes ops def append_volume(context, instance, volume_id): """Append volume_id to instance.""" IMPL.append_volume(context, instance, volume_id) def remove_volume(context, instance, volume_id): """Remove volume_id in instance.""" IMPL.remove_volume(context, instance, volume_id) # Cluster Template ops @to_dict def cluster_template_get(context, cluster_template): """Return the cluster_template or None if it does not exist.""" return IMPL.cluster_template_get(context, cluster_template) @to_dict def cluster_template_get_all(context, regex_search=False, **kwargs): """Get all cluster templates filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.cluster_template_get_all(context, regex_search, **kwargs) @to_dict def cluster_template_create(context, values): """Create a cluster_template from the values dictionary.""" return IMPL.cluster_template_create(context, values) def cluster_template_destroy(context, cluster_template, ignore_prot_on_def=False): """Destroy the cluster_template or raise if it does not exist.""" IMPL.cluster_template_destroy(context, cluster_template, ignore_prot_on_def) @to_dict def cluster_template_update(context, values, ignore_prot_on_def=False): """Update a cluster_template from the values dictionary.""" return IMPL.cluster_template_update(context, values, ignore_prot_on_def) # Node Group Template ops @to_dict def node_group_template_get(context, node_group_template): """Return the Node Group Template or None if it does not exist.""" return IMPL.node_group_template_get(context, node_group_template) @to_dict def node_group_template_get_all(context, regex_search=False, **kwargs): """Get all Node Group Templates filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.node_group_template_get_all(context, regex_search, **kwargs) @to_dict def node_group_template_create(context, values): """Create a Node Group Template from the values dictionary.""" return IMPL.node_group_template_create(context, values) def node_group_template_destroy(context, node_group_template, ignore_prot_on_def=False): """Destroy the Node Group Template or raise if it does not exist.""" IMPL.node_group_template_destroy(context, node_group_template, ignore_prot_on_def) @to_dict def node_group_template_update(context, node_group_template, ignore_prot_on_def=False): """Update a Node Group Template from the values in a dictionary.""" return IMPL.node_group_template_update(context, node_group_template, ignore_prot_on_def) # Data Source ops @to_dict def data_source_get(context, data_source): """Return the Data Source or None if it does not exist.""" return IMPL.data_source_get(context, data_source) @to_dict def data_source_get_all(context, regex_search=False, **kwargs): """Get all Data Sources filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.data_source_get_all(context, regex_search, **kwargs) def data_source_count(context, **kwargs): """Count Data Sources filtered by **kwargs. Uses sqlalchemy "in_" clause for any tuple values Uses sqlalchemy "like" clause for any string values containing % """ return IMPL.data_source_count(context, **kwargs) @to_dict def data_source_create(context, values): """Create a Data Source from the values dictionary.""" return IMPL.data_source_create(context, values) def data_source_destroy(context, data_source): """Destroy the Data Source or raise if it does not exist.""" IMPL.data_source_destroy(context, data_source) @to_dict def data_source_update(context, data_source): """Create a Data Source from the values dictionary.""" return IMPL.data_source_update(context, data_source) # JobExecutions ops @to_dict def job_execution_get(context, job_execution): """Return the JobExecution or None if it does not exist.""" return IMPL.job_execution_get(context, job_execution) @to_dict def job_execution_get_all(context, regex_search=False, **kwargs): """Get all JobExecutions filtered by **kwargs. kwargs key values may be the names of fields in a JobExecution plus the following special values with the indicated meaning: 'cluster.name' -- name of the Cluster referenced by the JobExecution 'job.name' -- name of the Job referenced by the JobExecution 'status' -- JobExecution['info']['status'] :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.job_execution_get_all(context, regex_search, **kwargs) def job_execution_count(context, **kwargs): """Count number of JobExecutions filtered by **kwargs. e.g. job_execution_count(cluster_id=12, input_id=123) """ return IMPL.job_execution_count(context, **kwargs) @to_dict def job_execution_create(context, values): """Create a JobExecution from the values dictionary.""" return IMPL.job_execution_create(context, values) @to_dict def job_execution_update(context, job_execution, values): """Create a JobExecution from the values dictionary.""" return IMPL.job_execution_update(context, job_execution, values) def job_execution_destroy(context, job_execution): """Destroy the JobExecution or raise if it does not exist.""" IMPL.job_execution_destroy(context, job_execution) # Job ops @to_dict def job_get(context, job): """Return the Job or None if it does not exist.""" return IMPL.job_get(context, job) @to_dict def job_get_all(context, regex_search=False, **kwargs): """Get all Jobs filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.job_get_all(context, regex_search, **kwargs) @to_dict def job_create(context, values): """Create a Job from the values dictionary.""" return IMPL.job_create(context, values) @to_dict def job_update(context, job, values): """Update a Job from the values dictionary.""" return IMPL.job_update(context, job, values) def job_destroy(context, job): """Destroy the Job or raise if it does not exist.""" IMPL.job_destroy(context, job) @to_dict def job_binary_get_all(context, regex_search=False, **kwargs): """Get all JobBinarys filtered by **kwargs. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.job_binary_get_all(context, regex_search, **kwargs) @to_dict def job_binary_get(context, job_binary): """Return the JobBinary or None if it does not exist.""" return IMPL.job_binary_get(context, job_binary) @to_dict def job_binary_create(context, values): """Create a JobBinary from the values dictionary.""" return IMPL.job_binary_create(context, values) def job_binary_destroy(context, job_binary): """Destroy the JobBinary or raise if it does not exist.""" IMPL.job_binary_destroy(context, job_binary) @to_dict def job_binary_update(context, values): """Update the JobBinary with the provided values""" return IMPL.job_binary_update(context, values) @to_dict def job_binary_internal_get_all(context, regex_search=False, **kwargs): """Get all JobBinaryInternals filtered by **kwargs. The JobBinaryInternals returned do not contain a data field. :param context: The context, and associated authentication, to use with this operation :param regex_search: If True, enable regex matching for filter values. See the user guide for more information on how regex matching is handled. If False, no regex matching is done. :param kwargs: Specifies values for named fields by which to constrain the search """ return IMPL.job_binary_internal_get_all(context, regex_search, **kwargs) @to_dict def job_binary_internal_get(context, job_binary_internal): """Return the JobBinaryInternal or None if it does not exist.""" return IMPL.job_binary_internal_get(context, job_binary_internal) @to_dict def job_binary_internal_create(context, values): """Create a JobBinaryInternal from the values dictionary.""" return IMPL.job_binary_internal_create(context, values) def job_binary_internal_destroy(context, job_binary_internal): """Destroy the JobBinaryInternal or raise if it does not exist.""" IMPL.job_binary_internal_destroy(context, job_binary_internal) def job_binary_internal_get_raw_data(context, job_binary_internal_id): """Return the binary data field from the specified JobBinaryInternal.""" return IMPL.job_binary_internal_get_raw_data(context, job_binary_internal_id) @to_dict def job_binary_internal_update(context, job_binary_internal, values): """Update the JobBinaryInternal with the provided values""" return IMPL.job_binary_internal_update( context, job_binary_internal, values) # Events ops def cluster_provision_step_add(context, cluster_id, values): """Create a cluster assigned ProvisionStep from the values dictionary.""" return IMPL.cluster_provision_step_add(context, cluster_id, values) def cluster_provision_step_update(context, step_id): """Updates provision step.""" return IMPL.cluster_provision_step_update(context, step_id) def cluster_provision_progress_update(context, cluster_id): """Return cluster with provision progress updated field.""" return IMPL.cluster_provision_progress_update(context, cluster_id) def cluster_event_add(context, provision_step, values): """Assign new event to the specified provision step.""" return IMPL.cluster_event_add(context, provision_step, values) # Health verifications / checks ops @to_dict def cluster_verification_add(context, cluster_id, values): """Return created verification for the specified cluster.""" return IMPL.cluster_verification_add(context, cluster_id, values) @to_dict def cluster_verification_get(context, verification_id): """Return verification with the specified verification_id.""" return IMPL.cluster_verification_get(context, verification_id) @to_dict def cluster_verification_update(context, verification_id, values): """Return updated verification with the specified verification_id.""" return IMPL.cluster_verification_update(context, verification_id, values) def cluster_verification_delete(context, verification_id): """"Delete verification with the specified id.""" return IMPL.cluster_verification_delete(context, verification_id) @to_dict def cluster_health_check_add(context, verification_id, values): """Return created health check in the specified verification.""" return IMPL.cluster_health_check_add(context, verification_id, values) @to_dict def cluster_health_check_get(context, health_check_id): """Return health check with the specified health_check_id.""" return IMPL.cluster_health_check_get(context, health_check_id) @to_dict def cluster_health_check_update(context, health_check_id, values): """Return updated health check with the specified health_check_id.""" return IMPL.cluster_health_check_update(context, health_check_id, values) @to_dict def plugin_create(context, values): """Return created DB entry for plugin.""" return IMPL.plugin_create(context, values) @to_dict def plugin_get(context, name): """Return DB entry for plugin.""" return IMPL.plugin_get(context, name) @to_dict def plugin_get_all(context): """Return DB entries of all plugins.""" return IMPL.plugin_get_all(context) @to_dict def plugin_update(context, name, values): """Return updated DB entry for plugin.""" return IMPL.plugin_update(context, name, values) def plugin_remove(context, name): """Remove DB entry for plugin.""" return IMPL.plugin_remove(context, name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/base.py0000664000175000017500000000156500000000000016177 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Base class for classes that need modular database access.""" from oslo_config import cfg from oslo_utils import importutils CONF = cfg.CONF class Base(object): """DB driver is injected in the init method.""" def __init__(self): self.db = importutils.try_import(CONF.db_driver) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.697891 sahara-16.0.0/sahara/db/migration/0000775000175000017500000000000000000000000016675 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/__init__.py0000664000175000017500000000000000000000000020774 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic.ini0000664000175000017500000000170000000000000020770 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = sahara/db/migration/alembic_migrations # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false sqlalchemy.url = # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.697891 sahara-16.0.0/sahara/db/migration/alembic_migrations/0000775000175000017500000000000000000000000022525 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/README.md0000664000175000017500000000465500000000000024016 0ustar00zuulzuul00000000000000 The migrations in `alembic_migrations/versions` contain the changes needed to migrate between Sahara database revisions. A migration occurs by executing a script that details the changes needed to upgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially. The scripts are executed by Sahara's migration wrapper which uses the Alembic library to manage the migration. Sahara supports migration from Icehouse or later. You can upgrade to the latest database version via: ``` $ sahara-db-manage --config-file /path/to/sahara.conf upgrade head ``` To check the current database version: ``` $ sahara-db-manage --config-file /path/to/sahara.conf current ``` To create a script to run the migration offline: ``` $ sahara-db-manage --config-file /path/to/sahara.conf upgrade head --sql ``` To run the offline migration between specific migration versions: ``` $ sahara-db-manage --config-file /path/to/sahara.conf upgrade : --sql ``` Upgrade the database incrementally: ``` $ sahara-db-manage --config-file /path/to/sahara.conf upgrade --delta <# of revs> ``` Create new revision: ``` $ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" --autogenerate ``` Create a blank file: ``` $ sahara-db-manage --config-file /path/to/sahara.conf revision -m "description of revision" ``` This command does not perform any migrations, it only sets the revision. Revision may be any existing revision. Use this command carefully. ``` $ sahara-db-manage --config-file /path/to/sahara.conf stamp ``` To verify that the timeline does branch, you can run this command: ``` $ sahara-db-manage --config-file /path/to/sahara.conf check_migration ``` If the migration path does branch, you can find the branch point via: ``` $ sahara-db-manage --config-file /path/to/sahara.conf history ``` ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/env.py0000664000175000017500000000515500000000000023675 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Based on Neutron's migration/cli.py from logging import config as c from alembic import context from oslo_utils import importutils from sqlalchemy import create_engine from sqlalchemy import pool from sahara.db.sqlalchemy import model_base importutils.try_import('sahara.db.sqlalchemy.models') # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config sahara_config = config.sahara_config # Interpret the config file for Python logging. # This line sets up loggers basically. c.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = model_base.SaharaBase.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ context.configure(url=sahara_config.database.connection) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = create_engine( sahara_config.database.connection, poolclass=pool.NullPool) connection = engine.connect() context.configure( connection=connection, target_metadata=target_metadata) try: with context.begin_transaction(): context.run_migrations() finally: connection.close() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/script.py.mako0000664000175000017500000000167000000000000025335 0ustar00zuulzuul00000000000000# Copyright ${create_date.year} OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7058911 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/0000775000175000017500000000000000000000000024375 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/001_icehouse.py0000664000175000017500000004426400000000000027145 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Icehouse release Revision ID: 001 Revises: None Create Date: 2014-04-01 20:46:25.783444 """ # revision identifiers, used by Alembic. revision = '001' down_revision = None from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table('jobs', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('type', sa.String(length=80), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('node_group_templates', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('flavor_id', sa.String(length=36), nullable=False), sa.Column('image_id', sa.String(length=36), nullable=True), sa.Column('plugin_name', sa.String(length=80), nullable=False), sa.Column('hadoop_version', sa.String(length=80), nullable=False), sa.Column('node_processes', st.JsonEncoded(), nullable=True), sa.Column('node_configs', st.JsonEncoded(), nullable=True), sa.Column('volumes_per_node', sa.Integer(), nullable=False), sa.Column('volumes_size', sa.Integer(), nullable=True), sa.Column('volume_mount_prefix', sa.String(length=80), nullable=True), sa.Column('floating_ip_pool', sa.String(length=36), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('cluster_templates', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('cluster_configs', st.JsonEncoded(), nullable=True), sa.Column('default_image_id', sa.String(length=36), nullable=True), sa.Column('anti_affinity', st.JsonEncoded(), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('neutron_management_network', sa.String(length=36), nullable=True), sa.Column('plugin_name', sa.String(length=80), nullable=False), sa.Column('hadoop_version', sa.String(length=80), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('job_binary_internal', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('data', st.LargeBinary(), nullable=True), sa.Column('datasize', sa.BIGINT(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('job_binaries', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('url', sa.String(length=256), nullable=False), sa.Column('extra', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('data_sources', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('type', sa.String(length=80), nullable=False), sa.Column('url', sa.String(length=256), nullable=False), sa.Column('credentials', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('libs_association', sa.Column('Job_id', sa.String(length=36), nullable=True), sa.Column('JobBinary_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['JobBinary_id'], ['job_binaries.id'], ), sa.ForeignKeyConstraint(['Job_id'], ['jobs.id'], ), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('clusters', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('description', sa.Text(), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('trust_id', sa.String(length=36), nullable=True), sa.Column('is_transient', sa.Boolean(), nullable=True), sa.Column('plugin_name', sa.String(length=80), nullable=False), sa.Column('hadoop_version', sa.String(length=80), nullable=False), sa.Column('cluster_configs', st.JsonEncoded(), nullable=True), sa.Column('default_image_id', sa.String(length=36), nullable=True), sa.Column('neutron_management_network', sa.String(length=36), nullable=True), sa.Column('anti_affinity', st.JsonEncoded(), nullable=True), sa.Column('management_private_key', sa.Text(), nullable=False), sa.Column('management_public_key', sa.Text(), nullable=False), sa.Column('user_keypair_id', sa.String(length=80), nullable=True), sa.Column('status', sa.String(length=80), nullable=True), sa.Column('status_description', sa.String(length=200), nullable=True), sa.Column('info', st.JsonEncoded(), nullable=True), sa.Column('extra', st.JsonEncoded(), nullable=True), sa.Column('cluster_template_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['cluster_template_id'], ['cluster_templates.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('templates_relations', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('flavor_id', sa.String(length=36), nullable=False), sa.Column('image_id', sa.String(length=36), nullable=True), sa.Column('node_processes', st.JsonEncoded(), nullable=True), sa.Column('node_configs', st.JsonEncoded(), nullable=True), sa.Column('volumes_per_node', sa.Integer(), nullable=True), sa.Column('volumes_size', sa.Integer(), nullable=True), sa.Column('volume_mount_prefix', sa.String(length=80), nullable=True), sa.Column('count', sa.Integer(), nullable=False), sa.Column('cluster_template_id', sa.String(length=36), nullable=True), sa.Column('node_group_template_id', sa.String(length=36), nullable=True), sa.Column('floating_ip_pool', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['cluster_template_id'], ['cluster_templates.id'], ), sa.ForeignKeyConstraint(['node_group_template_id'], ['node_group_templates.id'], ), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('mains_association', sa.Column('Job_id', sa.String(length=36), nullable=True), sa.Column('JobBinary_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['JobBinary_id'], ['job_binaries.id'], ), sa.ForeignKeyConstraint(['Job_id'], ['jobs.id'], ), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('job_executions', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('job_id', sa.String(length=36), nullable=True), sa.Column('input_id', sa.String(length=36), nullable=True), sa.Column('output_id', sa.String(length=36), nullable=True), sa.Column('start_time', sa.DateTime(), nullable=True), sa.Column('end_time', sa.DateTime(), nullable=True), sa.Column('cluster_id', sa.String(length=36), nullable=True), sa.Column('info', st.JsonEncoded(), nullable=True), sa.Column('progress', sa.Float(), nullable=True), sa.Column('oozie_job_id', sa.String(length=100), nullable=True), sa.Column('return_code', sa.String(length=80), nullable=True), sa.Column('job_configs', st.JsonEncoded(), nullable=True), sa.Column('extra', st.JsonEncoded(), nullable=True), sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ), sa.ForeignKeyConstraint(['input_id'], ['data_sources.id'], ), sa.ForeignKeyConstraint(['job_id'], ['jobs.id'], ), sa.ForeignKeyConstraint(['output_id'], ['data_sources.id'], ), sa.PrimaryKeyConstraint('id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('node_groups', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=80), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('flavor_id', sa.String(length=36), nullable=False), sa.Column('image_id', sa.String(length=36), nullable=True), sa.Column('image_username', sa.String(length=36), nullable=True), sa.Column('node_processes', st.JsonEncoded(), nullable=True), sa.Column('node_configs', st.JsonEncoded(), nullable=True), sa.Column('volumes_per_node', sa.Integer(), nullable=True), sa.Column('volumes_size', sa.Integer(), nullable=True), sa.Column('volume_mount_prefix', sa.String(length=80), nullable=True), sa.Column('count', sa.Integer(), nullable=False), sa.Column('cluster_id', sa.String(length=36), nullable=True), sa.Column('node_group_template_id', sa.String(length=36), nullable=True), sa.Column('floating_ip_pool', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ), sa.ForeignKeyConstraint(['node_group_template_id'], ['node_group_templates.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'cluster_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('instances', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('node_group_id', sa.String(length=36), nullable=True), sa.Column('instance_id', sa.String(length=36), nullable=True), sa.Column('instance_name', sa.String(length=80), nullable=False), sa.Column('internal_ip', sa.String(length=15), nullable=True), sa.Column('management_ip', sa.String(length=15), nullable=True), sa.Column('volumes', st.JsonEncoded(), nullable=True), sa.ForeignKeyConstraint(['node_group_id'], ['node_groups.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('instance_id', 'node_group_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/002_placeholder.py0000664000175000017500000000142100000000000027610 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 002 Revises: 001 Create Date: 2014-04-01 21:04:47.941098 """ # revision identifiers, used by Alembic. revision = '002' down_revision = '001' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/003_placeholder.py0000664000175000017500000000142100000000000027611 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 003 Revises: 002 Create Date: 2014-04-01 21:05:00.270366 """ # revision identifiers, used by Alembic. revision = '003' down_revision = '002' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/004_placeholder.py0000664000175000017500000000142100000000000027612 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 004 Revises: 003 Create Date: 2014-04-01 21:04:57.627883 """ # revision identifiers, used by Alembic. revision = '004' down_revision = '003' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/005_placeholder.py0000664000175000017500000000142100000000000027613 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 005 Revises: 004 Create Date: 2014-04-01 21:04:54.928605 """ # revision identifiers, used by Alembic. revision = '005' down_revision = '004' def upgrade(): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/006_placeholder.py0000664000175000017500000000142100000000000027614 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """placeholder Revision ID: 006 Revises: 005 Create Date: 2014-04-01 21:04:52.194332 """ # revision identifiers, used by Alembic. revision = '006' down_revision = '005' def upgrade(): pass ././@PaxHeader0000000000000000000000000000020500000000000011452 xustar0000000000000000111 path=sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/007_increase_status_description_size.py 22 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/007_increase_status_description_size.p0000664000175000017500000000203300000000000033773 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """convert clusters.status_description to LongText Revision ID: 007 Revises: 006 Create Date: 2014-06-20 22:36:00.783444 """ # revision identifiers, used by Alembic. revision = '007' down_revision = '006' from alembic import op from sahara.db.sqlalchemy import types as st def upgrade(): op.alter_column('clusters', 'status_description', type_=st.LongText(), existing_nullable=True, existing_server_default=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/008_security_groups.py0000664000175000017500000000227500000000000030612 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add security_groups field to node groups Revision ID: 008 Revises: 007 Create Date: 2014-07-15 14:31:49.685689 """ # revision identifiers, used by Alembic. revision = '008' down_revision = '007' from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st def upgrade(): op.add_column('node_group_templates', sa.Column('security_groups', st.JsonEncoded())) op.add_column('node_groups', sa.Column('security_groups', st.JsonEncoded())) op.add_column('templates_relations', sa.Column('security_groups', st.JsonEncoded())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/009_rollback_info.py0000664000175000017500000000172500000000000030150 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add rollback info to cluster Revision ID: 009 Revises: 008 Create Date: 2014-06-25 22:36:00.783444 """ # revision identifiers, used by Alembic. revision = '009' down_revision = '008' from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st def upgrade(): op.add_column('clusters', sa.Column('rollback_info', st.JsonEncoded())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/010_auto_security_groups.py0000664000175000017500000000243600000000000031632 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add auto_security_groups flag to node group Revision ID: 010 Revises: 009 Create Date: 2014-07-21 14:31:49.685689 """ # revision identifiers, used by Alembic. revision = '010' down_revision = '009' from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st def upgrade(): op.add_column('node_group_templates', sa.Column('auto_security_group', sa.Boolean())) op.add_column('node_groups', sa.Column('auto_security_group', sa.Boolean())) op.add_column('templates_relations', sa.Column('auto_security_group', sa.Boolean())) op.add_column('node_groups', sa.Column('open_ports', st.JsonEncoded())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/011_sahara_info.py0000664000175000017500000000173200000000000027605 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add Sahara settings info to cluster Revision ID: 011 Revises: 010 Create Date: 2014-08-26 22:36:00.783444 """ # revision identifiers, used by Alembic. revision = '011' down_revision = '010' from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st def upgrade(): op.add_column('clusters', sa.Column('sahara_info', st.JsonEncoded())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/012_availability_zone.py0000664000175000017500000000232200000000000031035 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2014, Adrien Vergé # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add availability_zone field to node groups Revision ID: 012 Revises: 011 Create Date: 2014-09-08 15:37:00.000000 """ # revision identifiers, used by Alembic. revision = '012' down_revision = '011' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('node_group_templates', sa.Column('availability_zone', sa.String(length=255))) op.add_column('node_groups', sa.Column('availability_zone', sa.String(length=255))) op.add_column('templates_relations', sa.Column('availability_zone', sa.String(length=255))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/013_volumes_availability_zone.py0000664000175000017500000000251000000000000032607 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright 2014, Adrien Vergé # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add volumes_availability_zone field to node groups Revision ID: 013 Revises: 012 Create Date: 2014-09-08 15:37:00.000000 """ # revision identifiers, used by Alembic. revision = '013' down_revision = '012' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('node_group_templates', sa.Column('volumes_availability_zone', sa.String(length=255))) op.add_column('node_groups', sa.Column('volumes_availability_zone', sa.String(length=255))) op.add_column('templates_relations', sa.Column('volumes_availability_zone', sa.String(length=255))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/014_add_volume_type.py0000664000175000017500000000237200000000000030517 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add_volume_type Revision ID: 014 Revises: 013 Create Date: 2014-10-09 12:47:17.871520 """ # revision identifiers, used by Alembic. revision = '014' down_revision = '013' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('node_group_templates', sa.Column('volume_type', sa.String(length=255), nullable=True)) op.add_column('node_groups', sa.Column('volume_type', sa.String(length=255), nullable=True)) op.add_column('templates_relations', sa.Column('volume_type', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/015_add_events_objects.py0000664000175000017500000000746600000000000031176 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add_events_objects Revision ID: 015 Revises: 014 Create Date: 2014-11-07 15:20:21.806128 """ # revision identifiers, used by Alembic. revision = '015' down_revision = '014' from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table('cluster_provision_steps', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('cluster_id', sa.String(length=36), nullable=True), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('step_name', sa.String(length=80), nullable=True), sa.Column('step_type', sa.String(length=36), nullable=True), sa.Column('completed', sa.Integer(), nullable=True), sa.Column('total', sa.Integer(), nullable=True), sa.Column('successful', sa.Boolean(), nullable=True), sa.Column('started_at', sa.DateTime(), nullable=True), sa.Column('completed_at', sa.DateTime(), nullable=True), sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('id', 'cluster_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table('cluster_events', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('node_group_id', sa.String(length=36), nullable=True), sa.Column('instance_id', sa.String(length=36), nullable=True), sa.Column('instance_name', sa.String(length=80), nullable=True), sa.Column('event_info', sa.Text(), nullable=True), sa.Column('successful', sa.Boolean(), nullable=False), sa.Column('step_id', sa.String(length=36), nullable=True), sa.ForeignKeyConstraint( ['step_id'], ['cluster_provision_steps.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('id', 'step_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/016_is_proxy_gateway.py0000664000175000017500000000216200000000000030733 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add is_proxy_gateway Revision ID: 016 Revises: 015 Create Date: 2014-11-10 12:47:17.871520 """ # revision identifiers, used by Alembic. revision = '016' down_revision = '015' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('node_group_templates', sa.Column('is_proxy_gateway', sa.Boolean())) op.add_column('node_groups', sa.Column('is_proxy_gateway', sa.Boolean())) op.add_column('templates_relations', sa.Column('is_proxy_gateway', sa.Boolean())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/017_drop_progress.py0000664000175000017500000000154300000000000030231 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """drop progress in JobExecution Revision ID: 017 Revises: 016 Create Date: 2015-02-25 09:23:04.390388 """ # revision identifiers, used by Alembic. revision = '017' down_revision = '016' from alembic import op def upgrade(): op.drop_column('job_executions', 'progress') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/018_volume_local_to_instance.py0000664000175000017500000000222700000000000032411 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add volume_local_to_instance flag Revision ID: 018 Revises: 017 Create Date: 2015-03-03 14:35:43.625429 """ # revision identifiers, used by Alembic. revision = '018' down_revision = '017' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('node_group_templates', sa.Column('volume_local_to_instance', sa.Boolean())) op.add_column('node_groups', sa.Column('volume_local_to_instance', sa.Boolean())) op.add_column('templates_relations', sa.Column('volume_local_to_instance', sa.Boolean())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/019_is_default_for_templates.py0000664000175000017500000000210700000000000032403 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add is_default field for cluster and node_group templates Revision ID: 019 Revises: 018 Create Date: 2015-03-02 14:32:04.415021 """ # revision identifiers, used by Alembic. revision = '019' down_revision = '018' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('cluster_templates', sa.Column('is_default', sa.Boolean(), nullable=True)) op.add_column('node_group_templates', sa.Column('is_default', sa.Boolean(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/020_remove_redandunt_progress_ops.py0000664000175000017500000000174700000000000033507 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """remove redandunt progress ops Revision ID: 020 Revises: 019 Create Date: 2015-02-26 15:01:41.015076 """ # revision identifiers, used by Alembic. revision = '020' down_revision = '019' from alembic import op def upgrade(): op.drop_column('cluster_provision_steps', 'completed_at') op.drop_column('cluster_provision_steps', 'completed') op.drop_column('cluster_provision_steps', 'started_at') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/021_datasource_placeholders.py0000664000175000017500000000200000000000000032200 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add data_source_urls to job_executions to support placeholders Revision ID: 021 Revises: 020 Create Date: 2015-02-24 12:47:17.871520 """ # revision identifiers, used by Alembic. revision = '021' down_revision = '020' from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st def upgrade(): op.add_column('job_executions', sa.Column('data_source_urls', st.JsonEncoded())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/022_add_job_interface.py0000664000175000017500000000500400000000000030733 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add_job_interface Revision ID: 022 Revises: 021 Create Date: 2015-01-27 15:53:22.128263 """ # revision identifiers, used by Alembic. revision = '022' down_revision = '021' from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table('job_interface_arguments', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('job_id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=True), sa.Column('name', sa.String(80), nullable=False), sa.Column('description', sa.Text()), sa.Column('mapping_type', sa.String(80), nullable=False), sa.Column('location', sa.Text(), nullable=False), sa.Column('value_type', sa.String(80), nullable=False), sa.Column('required', sa.Boolean(), nullable=False), sa.Column('order', sa.SmallInteger(), nullable=False), sa.Column('default', sa.Text()), sa.ForeignKeyConstraint(['job_id'], ['jobs.id']), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('job_id', 'order'), sa.UniqueConstraint('job_id', 'name'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/023_add_use_autoconfig.py0000664000175000017500000000245100000000000031157 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add_use_autoconfig Revision ID: 023 Revises: 022 Create Date: 2015-04-24 14:51:39.582085 """ # revision identifiers, used by Alembic. revision = '023' down_revision = '022' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('clusters', sa.Column('use_autoconfig', sa.Boolean())) op.add_column('cluster_templates', sa.Column('use_autoconfig', sa.Boolean())) op.add_column('node_group_templates', sa.Column('use_autoconfig', sa.Boolean())) op.add_column('node_groups', sa.Column('use_autoconfig', sa.Boolean())) op.add_column('templates_relations', sa.Column('use_autoconfig', sa.Boolean())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/024_manila_shares.py0000664000175000017500000000254700000000000030152 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """manila_shares Revision ID: 024 Revises: 023 Create Date: 2015-07-20 14:51:20.275823 """ # revision identifiers, used by Alembic. revision = '024' down_revision = '023' from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.add_column('node_group_templates', sa.Column('shares', st.JsonEncoded())) op.add_column('node_groups', sa.Column('shares', st.JsonEncoded())) op.add_column('templates_relations', sa.Column('shares', st.JsonEncoded())) op.add_column('clusters', sa.Column('shares', st.JsonEncoded())) op.add_column('cluster_templates', sa.Column('shares', st.JsonEncoded())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/025_increase_ip_column_size.py0000664000175000017500000000207400000000000032230 0ustar00zuulzuul00000000000000# Copyright 2015 Telles Nobrega # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Increase internal_ip and management_ip column size to work with IPv6 Revision ID: 025 Revises: 024 Create Date: 2015-07-17 09:58:22.128263 """ # revision identifiers, used by Alembic. revision = '025' down_revision = '024' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('instances', 'internal_ip', type_=sa.String(45), nullable=True) op.alter_column('instances', 'management_ip', type_=sa.String(45), nullable=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/026_add_is_public_is_protected.py0000664000175000017500000000443700000000000032673 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add is_public and is_protected flags Revision ID: 026 Revises: 025 Create Date: 2015-06-24 12:41:52.571258 """ # revision identifiers, used by Alembic. revision = '026' down_revision = '025' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('clusters', sa.Column('is_public', sa.Boolean()),) op.add_column('cluster_templates', sa.Column('is_public', sa.Boolean())) op.add_column('node_group_templates', sa.Column('is_public', sa.Boolean())) op.add_column('data_sources', sa.Column('is_public', sa.Boolean())) op.add_column('job_executions', sa.Column('is_public', sa.Boolean())) op.add_column('jobs', sa.Column('is_public', sa.Boolean())) op.add_column('job_binary_internal', sa.Column('is_public', sa.Boolean())) op.add_column('job_binaries', sa.Column('is_public', sa.Boolean())) op.add_column('clusters', sa.Column('is_protected', sa.Boolean())) op.add_column('cluster_templates', sa.Column('is_protected', sa.Boolean())) op.add_column('node_group_templates', sa.Column('is_protected', sa.Boolean())) op.add_column('data_sources', sa.Column('is_protected', sa.Boolean())) op.add_column('job_executions', sa.Column('is_protected', sa.Boolean())) op.add_column('jobs', sa.Column('is_protected', sa.Boolean())) op.add_column('job_binary_internal', sa.Column('is_protected', sa.Boolean())) op.add_column('job_binaries', sa.Column('is_protected', sa.Boolean())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/027_rename_oozie_job_id.py0000664000175000017500000000173400000000000031326 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Rename oozie_job_id Revision ID: 027 Revises: 026 Create Date: 2015-07-27 14:31:02.413053 """ # revision identifiers, used by Alembic. revision = '027' down_revision = '026' from alembic import op import sqlalchemy as sa def upgrade(): op.alter_column('job_executions', 'oozie_job_id', new_column_name="engine_job_id", type_=sa.String(length=100)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/028_storage_devices_number.py0000664000175000017500000000172600000000000032064 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """add_storage_devices_number Revision ID: 028 Revises: 027 Create Date: 2015-07-20 16:56:23.562710 """ # revision identifiers, used by Alembic. revision = '028' down_revision = '027' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('instances', sa.Column('storage_devices_number', sa.Integer(), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/029_set_is_protected_on_is_default.py0000664000175000017500000000254600000000000033602 0ustar00zuulzuul00000000000000# Copyright 2015 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """set is_protected on is_default Revision ID: 029 Revises: 028 Create Date: 2015-11-4 12:41:52.571258 """ # revision identifiers, used by Alembic. revision = '029' down_revision = '028' from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table, column def upgrade(): ng = table('node_group_templates', column('is_protected', sa.Boolean), column('is_default', sa.Boolean)) op.execute( ng.update().where( ng.c.is_default).values({'is_protected': True}) ) clt = table('cluster_templates', column('is_protected', sa.Boolean), column('is_default', sa.Boolean)) op.execute( clt.update().where( clt.c.is_default).values({'is_protected': True}) ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/030-health-check.py0000664000175000017500000000431500000000000027572 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """health-check Revision ID: 029 Revises: 028 Create Date: 2016-01-26 16:11:46.008367 """ # revision identifiers, used by Alembic. revision = '030' down_revision = '029' from alembic import op import sqlalchemy as sa MYSQL_ENGINE = 'InnoDB' MYSQL_CHARSET = 'utf8' def upgrade(): op.create_table( 'cluster_verifications', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('cluster_id', sa.String(length=36), nullable=True), sa.Column('status', sa.String(length=15), nullable=True), sa.ForeignKeyConstraint(['cluster_id'], ['clusters.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('id', 'cluster_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) op.create_table( 'cluster_health_checks', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('verification_id', sa.String(length=36), nullable=True), sa.Column('status', sa.String(length=15), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.Column('name', sa.String(length=80), nullable=True), sa.ForeignKeyConstraint( ['verification_id'], ['cluster_verifications.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('id', 'verification_id'), mysql_engine=MYSQL_ENGINE, mysql_charset=MYSQL_CHARSET) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/031_added_plugins_table.py0000664000175000017500000000270700000000000031311 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """added_plugins_table Revision ID: 031 Revises: 030 Create Date: 2016-06-21 13:32:40.151321 """ from alembic import op import sqlalchemy as sa from sahara.db.sqlalchemy import types as st # revision identifiers, used by Alembic. revision = '031' down_revision = '030' def upgrade(): op.create_table( 'plugin_data', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('id', sa.String(length=36), nullable=False), sa.Column('tenant_id', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=15), nullable=False), sa.Column('plugin_labels', st.JsonEncoded(), nullable=True), sa.Column('version_labels', st.JsonEncoded(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'tenant_id') ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/032_add_domain_name.py0000664000175000017500000000220000000000000030404 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """032_add_domain_name Revision ID: 032 Revises: 031 Create Date: 2016-07-21 13:33:33.674853 """ # revision identifiers, used by Alembic. revision = '032' down_revision = '031' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('cluster_templates', sa.Column( 'domain_name', sa.String(length=255), nullable=True)) op.add_column('clusters', sa.Column( 'domain_name', sa.String(length=255), nullable=True)) op.add_column('instances', sa.Column( 'dns_hostname', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000119 path=sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/033_add_anti_affinity_ratio_field_to_cluster.py 22 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/033_add_anti_affinity_ratio_field_to_c0000664000175000017500000000167100000000000033713 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """033_add anti_affinity_ratio field to cluster Revision ID: 033 Revises: 032 Create Date: 2016-01-05 09:40:25.941365 """ # revision identifiers, used by Alembic. revision = '033' down_revision = '032' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('clusters', sa.Column('anti_affinity_ratio', sa.Integer())) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/034_boot_from_volume.py0000664000175000017500000000231700000000000030715 0ustar00zuulzuul00000000000000# Copyright 2016 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Add boot_from_volumes field for node_groups and related classes Revision ID: 034 Revises: 033 Create Date: 2018-06-06 17:36:04.749264 """ # revision identifiers, used by Alembic. revision = '034' down_revision = '033' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('node_group_templates', sa.Column('boot_from_volume', sa.Boolean(), nullable=False)) op.add_column('node_groups', sa.Column('boot_from_volume', sa.Boolean(), nullable=False)) op.add_column('templates_relations', sa.Column('boot_from_volume', sa.Boolean(), nullable=False)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/alembic_migrations/versions/035_boot_from_volume_enhancements.py0000664000175000017500000000474200000000000033452 0ustar00zuulzuul00000000000000# Copyright 2019 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """boot from volume enhancements Revision ID: 035 Revises: 034 Create Date: 2019-01-07 19:55:54.025736 """ # revision identifiers, used by Alembic. revision = '035' down_revision = '034' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('node_group_templates', sa.Column('boot_volume_availability_zone', sa.String(length=255), nullable=True)) op.add_column('node_group_templates', sa.Column('boot_volume_local_to_instance', sa.Boolean(), nullable=True)) op.add_column('node_group_templates', sa.Column('boot_volume_type', sa.String(length=255), nullable=True)) op.add_column('node_groups', sa.Column('boot_volume_availability_zone', sa.String(length=255), nullable=True)) op.add_column('node_groups', sa.Column('boot_volume_local_to_instance', sa.Boolean(), nullable=True)) op.add_column('node_groups', sa.Column('boot_volume_type', sa.String(length=255), nullable=True)) op.add_column('templates_relations', sa.Column('boot_volume_availability_zone', sa.String(length=255), nullable=True)) op.add_column('templates_relations', sa.Column('boot_volume_local_to_instance', sa.Boolean(), nullable=True)) op.add_column('templates_relations', sa.Column('boot_volume_type', sa.String(length=255), nullable=True)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/migration/cli.py0000664000175000017500000000662500000000000020027 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from alembic import command as alembic_cmd from alembic import config as alembic_cfg from alembic import util as alembic_u from oslo_config import cfg from sahara.i18n import _ CONF = cfg.CONF def do_alembic_command(config, cmd, *args, **kwargs): try: getattr(alembic_cmd, cmd)(config, *args, **kwargs) except alembic_u.CommandError as e: alembic_u.err(str(e)) def do_check_migration(config, _cmd): do_alembic_command(config, 'branches') def do_upgrade_downgrade(config, cmd): if not CONF.command.revision and not CONF.command.delta: raise SystemExit(_('You must provide a revision or relative delta')) revision = CONF.command.revision if CONF.command.delta: sign = '+' if CONF.command.name == 'upgrade' else '-' revision = sign + str(CONF.command.delta) do_alembic_command(config, cmd, revision, sql=CONF.command.sql) def do_stamp(config, cmd): do_alembic_command(config, cmd, CONF.command.revision, sql=CONF.command.sql) def do_revision(config, cmd): do_alembic_command(config, cmd, message=CONF.command.message, autogenerate=CONF.command.autogenerate, sql=CONF.command.sql) def add_command_parsers(subparsers): for name in ['current', 'history', 'branches']: parser = subparsers.add_parser(name) parser.set_defaults(func=do_alembic_command) parser = subparsers.add_parser('check_migration') parser.set_defaults(func=do_check_migration) parser = subparsers.add_parser('upgrade') parser.add_argument('--delta', type=int) parser.add_argument('--sql', action='store_true') parser.add_argument('revision', nargs='?') parser.set_defaults(func=do_upgrade_downgrade) parser = subparsers.add_parser('stamp') parser.add_argument('--sql', action='store_true') parser.add_argument('revision') parser.set_defaults(func=do_stamp) parser = subparsers.add_parser('revision') parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.add_argument('--sql', action='store_true') parser.set_defaults(func=do_revision) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) CONF.register_cli_opt(command_opt) def main(): config = alembic_cfg.Config( os.path.join(os.path.dirname(__file__), 'alembic.ini') ) config.set_main_option('script_location', 'sahara.db.migration:alembic_migrations') # attach the Sahara conf to the Alembic conf config.sahara_config = CONF CONF(project='sahara') CONF.command.func(config, CONF.command.name) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7058911 sahara-16.0.0/sahara/db/sqlalchemy/0000775000175000017500000000000000000000000017046 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021145 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/sqlalchemy/api.py0000664000175000017500000015716100000000000020204 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Implementation of SQLAlchemy backend.""" import copy import sys import threading from oslo_config import cfg from oslo_db import exception as db_exc from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import utils from oslo_log import log as logging import six import sqlalchemy as sa from sahara.db.sqlalchemy import models as m from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.validations import acl as validate from sahara.utils import types LOG = logging.getLogger(__name__) CONF = cfg.CONF _FACADE = None _LOCK = threading.Lock() def _create_facade_lazily(): global _LOCK, _FACADE if _FACADE is None: with _LOCK: if _FACADE is None: _FACADE = db_session.EngineFacade.from_config(CONF, sqlite_fk=True) return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) def _parse_sorting_args(sort_by): if sort_by is None: sort_by = "id" if sort_by[0] == "-": return sort_by[1:], "desc" return sort_by, "asc" def _get_prev_and_next_objects(objects, limit, marker, order=None): if order == 'desc': objects.reverse() position = None if limit is None: return None, None if marker: for pos, obj in enumerate(objects): if obj.id == marker.id: position = pos break else: return None, None if position - limit >= 0: prev_marker = objects[position - limit].id else: prev_marker = None if position + limit < len(objects): next_marker = objects[position + limit].id else: next_marker = None else: if limit < len(objects): next_marker = objects[limit - 1].id else: next_marker = None prev_marker = None return prev_marker, next_marker def cleanup(): global _FACADE _FACADE = None def get_backend(): """The backend is this module itself.""" return sys.modules[__name__] def model_query(model, context, session=None, project_only=True): """Query helper. :param model: base model to query :param context: context to query under :param project_only: if present and context is user-type, then restrict query to match the context's tenant_id. """ session = session or get_session() query = session.query(model) if project_only and not context.is_admin: query = query.filter( (model.tenant_id == context.tenant_id) | getattr(model, 'is_public', False)) return query def count_query(model, context, session=None, project_only=None): """Count query helper. :param model: base model to query :param context: context to query under :param project_only: if present and context is user-type, then restrict query to match the context's project_id. """ return model_query(sa.func.count(model.id), context, session, project_only) def in_filter(query, cls, search_opts): """Add 'in' filters for specified columns. Add a sqlalchemy 'in' filter to the query for any entry in the 'search_opts' dict where the key is the name of a column in 'cls' and the value is a tuple. This allows the value of a column to be matched against multiple possible values (OR). Return the modified query and any entries in search_opts whose keys do not match columns or whose values are not tuples. :param query: a non-null query object :param cls: the database model class that filters will apply to :param search_opts: a dictionary whose key/value entries are interpreted as column names and search values :returns: a tuple containing the modified query and a dictionary of unused search_opts """ if not search_opts: return query, search_opts remaining = {} for k, v in six.iteritems(search_opts): if type(v) == tuple and k in cls.__table__.columns: col = cls.__table__.columns[k] query = query.filter(col.in_(v)) else: remaining[k] = v return query, remaining def like_filter(query, cls, search_opts): """Add 'like' filters for specified columns. Add a sqlalchemy 'like' filter to the query for any entry in the 'search_opts' dict where the key is the name of a column in 'cls' and the value is a string containing '%'. This allows the value of a column to be matched against simple sql string patterns using LIKE and the '%' wildcard. Return the modified query and any entries in search_opts whose keys do not match columns or whose values are not strings containing '%'. :param query: a non-null query object :param cls: the database model class the filters will apply to :param search_opts: a dictionary whose key/value entries are interpreted as column names and search patterns :returns: a tuple containing the modified query and a dictionary of unused search_opts """ if not search_opts: return query, search_opts remaining = {} for k, v in six.iteritems(search_opts): if isinstance(v, six.string_types) and ( '%' in v and k in cls.__table__.columns): col = cls.__table__.columns[k] query = query.filter(col.like(v)) else: remaining[k] = v return query, remaining def _get_regex_op(connection): db = connection.split(':')[0].split('+')[0] regexp_op_map = { 'postgresql': '~', 'mysql': 'REGEXP' } return regexp_op_map.get(db, None) def regex_filter(query, cls, regex_cols, search_opts): """Add regex filters for specified columns. Add a regex filter to the query for any entry in the 'search_opts' dict where the key is the name of a column in 'cls' and listed in 'regex_cols' and the value is a string. Return the modified query and any entries in search_opts whose keys do not match columns or whose values are not strings. This is only supported for mysql and postgres. For other databases, the query is not altered. :param query: a non-null query object :param cls: the database model class the filters will apply to :param regex_cols: a list of columns for which regex is supported :param search_opts: a dictionary whose key/value entries are interpreted as column names and search patterns :returns: a tuple containing the modified query and a dictionary of unused search_opts """ regex_op = _get_regex_op(CONF.database.connection) if not regex_op: return query, copy.copy(search_opts) remaining = {} for k, v in six.iteritems(search_opts): if isinstance(v, six.string_types) and ( k in cls.__table__.columns and k in regex_cols): col = cls.__table__.columns[k] query = query.filter(col.op(regex_op)(v)) else: remaining[k] = v return query, remaining def setup_db(): try: engine = get_engine() m.Cluster.metadata.create_all(engine) except sa.exc.OperationalError as e: LOG.warning("Database registration exception: {exc}".format(exc=e)) return False return True def drop_db(): try: engine = get_engine() m.Cluster.metadata.drop_all(engine) except Exception as e: LOG.warning("Database shutdown exception: {exc}".format(exc=e)) return False return True # Cluster ops def _cluster_get(context, session, cluster_id): query = model_query(m.Cluster, context, session) return query.filter_by(id=cluster_id).first() def cluster_get(context, cluster_id): return _cluster_get(context, get_session(), cluster_id) def cluster_get_all(context, regex_search=False, limit=None, marker=None, sort_by=None, **kwargs): sort_by, order = _parse_sorting_args(sort_by) regex_cols = ['name', 'description', 'plugin_name', 'tenant_id'] query = model_query(m.Cluster, context) if regex_search: query, kwargs = regex_filter(query, m.Cluster, regex_cols, kwargs) limit = int(limit) if limit else None marker = cluster_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( query.filter_by(**kwargs).order_by(sort_by).all(), limit, marker, order=order) result = utils.paginate_query(query.filter_by(**kwargs), m.Cluster, limit, [sort_by], marker, order) return types.Page(result, prev_marker, next_marker) def cluster_create(context, values): values = values.copy() cluster = m.Cluster() node_groups = values.pop("node_groups", []) cluster.update(values) session = get_session() try: with session.begin(): session.add(cluster) session.flush(objects=[cluster]) for ng in node_groups: node_group = m.NodeGroup() node_group.update(ng) node_group.update({"cluster_id": cluster.id}) session.add(node_group) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for object %(object)s. Failed on columns: " "%(columns)s") % {"object": e.value, "columns": e.columns}) return cluster_get(context, cluster.id) def cluster_update(context, cluster_id, values): session = get_session() try: with session.begin(): cluster = _cluster_get(context, session, cluster_id) if cluster is None: raise ex.NotFoundException(cluster_id, _("Cluster id '%s' not found!")) cluster.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for Cluster: %s") % e.columns) return cluster def cluster_destroy(context, cluster_id): session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if not cluster: raise ex.NotFoundException(cluster_id, _("Cluster id '%s' not found!")) session.delete(cluster) # Node Group ops def _node_group_get(context, session, node_group_id): query = model_query(m.NodeGroup, context, session) return query.filter_by(id=node_group_id).first() def node_group_add(context, cluster_id, values): session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if not cluster: raise ex.NotFoundException(cluster_id, _("Cluster id '%s' not found!")) node_group = m.NodeGroup() node_group.update({"cluster_id": cluster_id}) node_group.update(values) session.add(node_group) return node_group.id def node_group_update(context, node_group_id, values): session = get_session() with session.begin(): node_group = _node_group_get(context, session, node_group_id) if not node_group: raise ex.NotFoundException(node_group_id, _("Node Group id '%s' not found!")) node_group.update(values) def node_group_remove(context, node_group_id): session = get_session() with session.begin(): node_group = _node_group_get(context, session, node_group_id) if not node_group: raise ex.NotFoundException(node_group_id, _("Node Group id '%s' not found!")) session.delete(node_group) # Instance ops def _instance_get(context, session, instance_id): query = model_query(m.Instance, context, session) return query.filter_by(id=instance_id).first() def instance_add(context, node_group_id, values): session = get_session() with session.begin(): node_group = _node_group_get(context, session, node_group_id) if not node_group: raise ex.NotFoundException(node_group_id, _("Node Group id '%s' not found!")) instance = m.Instance() instance.update({"node_group_id": node_group_id}) instance.update(values) session.add(instance) node_group = _node_group_get(context, session, node_group_id) node_group.count += 1 return instance.id def instance_update(context, instance_id, values): session = get_session() with session.begin(): instance = _instance_get(context, session, instance_id) if not instance: raise ex.NotFoundException(instance_id, _("Instance id '%s' not found!")) instance.update(values) def instance_remove(context, instance_id): session = get_session() with session.begin(): instance = _instance_get(context, session, instance_id) if not instance: raise ex.NotFoundException(instance_id, _("Instance id '%s' not found!")) session.delete(instance) node_group_id = instance.node_group_id node_group = _node_group_get(context, session, node_group_id) node_group.count -= 1 # Volumes ops def append_volume(context, instance_id, volume_id): session = get_session() with session.begin(): instance = _instance_get(context, session, instance_id) if not instance: raise ex.NotFoundException(instance_id, _("Instance id '%s' not found!")) instance.volumes.append(volume_id) def remove_volume(context, instance_id, volume_id): session = get_session() with session.begin(): instance = _instance_get(context, session, instance_id) if not instance: raise ex.NotFoundException(instance_id, _("Instance id '%s' not found!")) instance.volumes.remove(volume_id) # Cluster Template ops def _cluster_template_get(context, session, cluster_template_id): query = model_query(m.ClusterTemplate, context, session) return query.filter_by(id=cluster_template_id).first() def cluster_template_get(context, cluster_template_id): return _cluster_template_get(context, get_session(), cluster_template_id) def cluster_template_get_all(context, regex_search=False, marker=None, limit=None, sort_by=None, **kwargs): regex_cols = ['name', 'description', 'plugin_name', 'tenant_id'] sort_by, order = _parse_sorting_args(sort_by) query = model_query(m.ClusterTemplate, context) if regex_search: query, kwargs = regex_filter(query, m.ClusterTemplate, regex_cols, kwargs) limit = int(limit) if limit else None marker = cluster_template_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( query.filter_by(**kwargs).order_by(sort_by).all(), limit, marker, order=order) result = utils.paginate_query(query.filter_by(**kwargs), m.ClusterTemplate, limit, [sort_by], marker, order) return types.Page(result, prev_marker, next_marker) def cluster_template_create(context, values): values = values.copy() cluster_template = m.ClusterTemplate() node_groups = values.pop("node_groups") or [] cluster_template.update(values) session = get_session() try: with session.begin(): session.add(cluster_template) session.flush(objects=[cluster_template]) for ng in node_groups: node_group = m.TemplatesRelation() node_group.update({"cluster_template_id": cluster_template.id}) node_group.update(ng) session.add(node_group) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for object %(object)s. Failed on columns: " "%(columns)s") % {"object": e.value, "columns": e.columns}) return cluster_template_get(context, cluster_template.id) def cluster_template_destroy(context, cluster_template_id, ignore_prot_on_def=False): session = get_session() with session.begin(): cluster_template = _cluster_template_get(context, session, cluster_template_id) if not cluster_template: raise ex.NotFoundException( cluster_template_id, _("Cluster Template id '%s' not found!")) validate.check_tenant_for_delete(context, cluster_template) if not (cluster_template.is_default and ignore_prot_on_def): validate.check_protected_from_delete(cluster_template) session.delete(cluster_template) def cluster_template_update(context, values, ignore_prot_on_def=False): explicit_node_groups = "node_groups" in values if explicit_node_groups: node_groups = values.pop("node_groups") if node_groups is None: node_groups = [] session = get_session() cluster_template_id = values['id'] try: with session.begin(): cluster_template = (_cluster_template_get( context, session, cluster_template_id)) if not cluster_template: raise ex.NotFoundException( cluster_template_id, _("Cluster Template id '%s' not found!")) validate.check_tenant_for_update(context, cluster_template) if not (cluster_template.is_default and ignore_prot_on_def): validate.check_protected_from_update(cluster_template, values) if len(cluster_template.clusters) > 0: raise ex.UpdateFailedException( cluster_template_id, _("Cluster Template id '%s' can not be updated. " "It is referenced by at least one cluster.") ) cluster_template.update(values) # The flush here will cause a duplicate entry exception if # unique constraints are violated, before we go ahead and delete # the node group templates session.flush(objects=[cluster_template]) # If node_groups has not been specified, then we are # keeping the old ones so don't delete! if explicit_node_groups: model_query(m.TemplatesRelation, context, session=session).filter_by( cluster_template_id=cluster_template_id).delete() for ng in node_groups: node_group = m.TemplatesRelation() node_group.update(ng) node_group.update({"cluster_template_id": cluster_template_id}) session.add(node_group) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for ClusterTemplate: %s") % e.columns) return cluster_template_get(context, cluster_template_id) # Node Group Template ops def _node_group_template_get(context, session, node_group_template_id): query = model_query(m.NodeGroupTemplate, context, session) return query.filter_by(id=node_group_template_id).first() def node_group_template_get(context, node_group_template_id): return _node_group_template_get(context, get_session(), node_group_template_id) def node_group_template_get_all(context, regex_search=False, marker=None, limit=None, sort_by=None, **kwargs): sort_by, order = _parse_sorting_args(sort_by) regex_cols = ['name', 'description', 'plugin_name', 'tenant_id'] limit = int(limit) if limit else None query = model_query(m.NodeGroupTemplate, context) if regex_search: query, kwargs = regex_filter(query, m.NodeGroupTemplate, regex_cols, kwargs) marker = node_group_template_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( query.filter_by(**kwargs).order_by(sort_by).all(), limit, marker, order=order) result = utils.paginate_query( query.filter_by(**kwargs), m.NodeGroupTemplate, limit, [sort_by], marker, order) return types.Page(result, prev_marker, next_marker) def node_group_template_create(context, values): node_group_template = m.NodeGroupTemplate() node_group_template.update(values) session = get_session() try: with session.begin(): session.add(node_group_template) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for NodeGroupTemplate: %s") % e.columns) return node_group_template def node_group_template_destroy(context, node_group_template_id, ignore_prot_on_def=False): session = get_session() with session.begin(): node_group_template = _node_group_template_get(context, session, node_group_template_id) if not node_group_template: raise ex.NotFoundException( node_group_template_id, _("Node Group Template id '%s' not found!")) validate.check_tenant_for_delete(context, node_group_template) if not (node_group_template.is_default and ignore_prot_on_def): validate.check_protected_from_delete(node_group_template) session.delete(node_group_template) def node_group_template_update(context, values, ignore_prot_on_def=False): session = get_session() try: with session.begin(): ngt_id = values['id'] ngt = _node_group_template_get(context, session, ngt_id) if not ngt: raise ex.NotFoundException( ngt_id, _("NodeGroupTemplate id '%s' not found")) validate.check_tenant_for_update(context, ngt) if not (ngt.is_default and ignore_prot_on_def): validate.check_protected_from_update(ngt, values) # Check to see that the node group template to be updated is not in # use by an existing cluster. for template_relationship in ngt.templates_relations: if len(template_relationship.cluster_template.clusters) > 0: raise ex.UpdateFailedException( ngt_id, _("NodeGroupTemplate id '%s' can not be updated. " "It is referenced by an existing cluster.") ) ngt.update(values) # Here we update any cluster templates that reference the # updated node group template for template_relationship in ngt.templates_relations: ct_id = template_relationship.cluster_template_id ct = cluster_template_get( context, template_relationship.cluster_template_id) node_groups = ct.node_groups ct_node_groups = [] for ng in node_groups: # Need to fill in all node groups, not just # the modified group ng_to_add = ng if ng.node_group_template_id == ngt_id: # use the updated node group template ng_to_add = ngt ng_to_add = ng_to_add.to_dict() ng_to_add.update( {"count": ng["count"], "node_group_template_id": ng.node_group_template_id}) ng_to_add.pop("updated_at", None) ng_to_add.pop("created_at", None) ng_to_add.pop("id", None) ct_node_groups.append(ng_to_add) ct_update = {"id": ct_id, "node_groups": ct_node_groups} cluster_template_update(context, ct_update, ignore_prot_on_def) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for NodeGroupTemplate: %s") % e.columns) return ngt # Data Source ops def _data_source_get(context, session, data_source_id): query = model_query(m.DataSource, context, session) return query.filter_by(id=data_source_id).first() def data_source_get(context, data_source_id): return _data_source_get(context, get_session(), data_source_id) def data_source_count(context, **kwargs): """Count DataSource objects filtered by search criteria in kwargs. Entries in kwargs indicate column names and search values. 'in' filters will be used to search for any entries in kwargs that name DataSource columns and have values of type tuple. This allows column values to match multiple values (OR) 'like' filters will be used for any entries in kwargs that name DataSource columns and have string values containing '%'. This allows column values to match simple wildcards. Any other entries in kwargs will be searched for using filter_by() """ query = model_query(m.DataSource, context) query, kwargs = in_filter(query, m.DataSource, kwargs) query, kwargs = like_filter(query, m.DataSource, kwargs) # Use normal filter_by for remaining keys return query.filter_by(**kwargs).count() def data_source_get_all(context, regex_search=False, limit=None, marker=None, sort_by=None, **kwargs): regex_cols = ['name', 'description', 'url'] sort_by, order = _parse_sorting_args(sort_by) query = model_query(m.DataSource, context) if regex_search: query, kwargs = regex_filter(query, m.DataSource, regex_cols, kwargs) limit = int(limit) if limit else None marker = data_source_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( query.filter_by(**kwargs).order_by(sort_by).all(), limit, marker, order=order) result = utils.paginate_query(query.filter_by(**kwargs), m.DataSource, limit, [sort_by], marker, order) return types.Page(result, prev_marker, next_marker) def data_source_create(context, values): data_source = m.DataSource() data_source.update(values) session = get_session() try: with session.begin(): session.add(data_source) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for DataSource: %s") % e.columns) return data_source def data_source_destroy(context, data_source_id): session = get_session() try: with session.begin(): data_source = _data_source_get(context, session, data_source_id) if not data_source: raise ex.NotFoundException( data_source_id, _("Data Source id '%s' not found!")) validate.check_tenant_for_delete(context, data_source) validate.check_protected_from_delete(data_source) session.delete(data_source) except db_exc.DBError as e: msg = ("foreign key constraint" in six.text_type(e) and _(" on foreign key constraint") or "") raise ex.DeletionFailed(_("Data Source deletion failed%s") % msg) def data_source_update(context, values): session = get_session() try: with session.begin(): ds_id = values['id'] data_source = _data_source_get(context, session, ds_id) if not data_source: raise ex.NotFoundException( ds_id, _("DataSource id '%s' not found")) validate.check_tenant_for_update(context, data_source) validate.check_protected_from_update(data_source, values) data_source.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for DataSource: %s") % e.columns) return data_source # JobExecution ops def _job_execution_get(context, session, job_execution_id): query = model_query(m.JobExecution, context, session) return query.filter_by(id=job_execution_id).first() def job_execution_get(context, job_execution_id): return _job_execution_get(context, get_session(), job_execution_id) def job_execution_get_all(context, regex_search=False, limit=None, marker=None, sort_by=None, **kwargs): """Get all JobExecutions filtered by **kwargs. kwargs key values may be the names of fields in a JobExecution plus the following special values with the indicated meaning: 'cluster.name' -- name of the Cluster referenced by the JobExecution 'job.name' -- name of the Job referenced by the JobExecution 'status' -- JobExecution['info']['status'] e.g. job_execution_get_all(cluster_id=12, input_id=123) job_execution_get_all(**{'cluster.name': 'test', 'job.name': 'wordcount'}) """ sort_by, order = _parse_sorting_args(sort_by) regex_cols = ['job.name', 'cluster.name'] # Remove the external fields if present, they'll # be handled with a join and filter externals = {k: kwargs.pop(k) for k in ['cluster.name', 'job.name', 'status'] if k in kwargs} # At this time, none of the fields in m.JobExecution itself # are candidates for regex search, however this code fragment # should remain in case that changes. This is the correct place # to insert regex filters on the m.JobExecution class query = model_query(m.JobExecution, context) if regex_search: query, kwargs = regex_filter(query, m.JobExecution, regex_cols, kwargs) # Filter JobExecution by the remaining kwargs. This has to be done # before application of the joins and filters because those # change the class that query.filter_by will apply to query = query.filter_by(**kwargs) # Now add the joins and filters for the externals if 'cluster.name' in externals: search_opts = {'name': externals['cluster.name']} query = query.join(m.Cluster) if regex_filter and 'cluster.name' in regex_cols: query, search_opts = regex_filter(query, m.Cluster, ['name'], search_opts) query = query.filter_by(**search_opts) if 'job.name' in externals: search_opts = {'name': externals['job.name']} query = query.join(m.Job) if regex_filter and 'job.name' in regex_cols: query, search_opts = regex_filter(query, m.Job, ['name'], search_opts) query = query.filter_by(**search_opts) res = query.order_by(sort_by).all() if order == 'desc': res.reverse() # 'info' is a JsonDictType which is stored as a string. # It would be possible to search for the substring containing # the value of 'status' in 'info', but 'info' also contains # data returned from a client and not managed by Sahara. # In the case of Oozie jobs, for example, other fields (actions) # also contain 'status'. Therefore we can't filter on it reliably # by a substring search in the query. if 'status' in externals: status = externals['status'].lower() res = [je for je in res if ( je['info'] and je['info'].get('status', '').lower() == status)] res_page = res if marker: n = None for i, je in enumerate(res): if je['id'] == marker: n = i if n: res_page = res[n:] if limit: limit = int(limit) res_page = res_page[:limit] if limit < len(res_page) else res_page marker = job_execution_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( res, limit, marker) return types.Page(res_page, prev_marker, next_marker) def job_execution_count(context, **kwargs): query = count_query(m.JobExecution, context) return query.filter_by(**kwargs).first()[0] def _get_config_section(configs, mapping_type): if mapping_type not in configs: configs[mapping_type] = [] if mapping_type == "args" else {} return configs[mapping_type] def _merge_execution_interface(job_ex, job, execution_interface): """Merges the interface for a job execution with that of its job.""" configs = job_ex.job_configs or {} nonexistent = object() positional_args = {} for arg in job.interface: value = nonexistent typed_configs = _get_config_section(configs, arg.mapping_type) # Interface args are our first choice for the value. if arg.name in execution_interface: value = execution_interface[arg.name] else: # If a default exists, we can use that, but... if arg.default is not None: value = arg.default # We should prefer an argument passed through the # job_configs that maps to the same location. if arg.mapping_type != "args": value = typed_configs.get(arg.location, value) if value is not nonexistent: if arg.mapping_type != "args": typed_configs[arg.location] = value else: positional_args[int(arg.location)] = value if positional_args: positional_args = [positional_args[i] for i in range(len(positional_args))] configs["args"] = positional_args + configs["args"] if configs and not job_ex.job_configs: job_ex.job_configs = configs def job_execution_create(context, values): session = get_session() execution_interface = values.pop('interface', {}) job_ex = m.JobExecution() job_ex.update(values) try: with session.begin(): job_ex.interface = [] job = _job_get(context, session, job_ex.job_id) if job.interface: _merge_execution_interface(job_ex, job, execution_interface) session.add(job_ex) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobExecution: %s") % e.columns) return job_ex def job_execution_update(context, job_execution_id, values): session = get_session() with session.begin(): job_ex = _job_execution_get(context, session, job_execution_id) if not job_ex: raise ex.NotFoundException(job_execution_id, _("JobExecution id '%s' not found!")) job_ex.update(values) session.add(job_ex) return job_ex def job_execution_destroy(context, job_execution_id): session = get_session() with session.begin(): job_ex = _job_execution_get(context, session, job_execution_id) if not job_ex: raise ex.NotFoundException(job_execution_id, _("JobExecution id '%s' not found!")) session.delete(job_ex) # Job ops def _job_get(context, session, job_id): query = model_query(m.Job, context, session) return query.filter_by(id=job_id).first() def job_get(context, job_id): return _job_get(context, get_session(), job_id) def job_get_all(context, regex_search=False, limit=None, marker=None, sort_by=None, **kwargs): regex_cols = ['name', 'description'] sort_by, order = _parse_sorting_args(sort_by) query = model_query(m.Job, context) if regex_search: query, kwargs = regex_filter(query, m.Job, regex_cols, kwargs) limit = int(limit) if limit else None marker = job_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( query.filter_by(**kwargs).order_by(sort_by).all(), limit, marker, order=order) result = utils.paginate_query(query.filter_by(**kwargs), m.Job, limit, [sort_by], marker, order) return types.Page(result, prev_marker, next_marker) def _append_job_binaries(context, session, from_list, to_list): for job_binary_id in from_list: job_binary = model_query( m.JobBinary, context, session).filter_by(id=job_binary_id).first() if job_binary is not None: to_list.append(job_binary) def _append_interface(context, from_list, to_list): for order, argument_values in enumerate(from_list): argument_values['tenant_id'] = context.tenant_id argument_values['order'] = order argument = m.JobInterfaceArgument() argument.update(argument_values) to_list.append(argument) def job_create(context, values): mains = values.pop("mains", []) libs = values.pop("libs", []) interface = values.pop("interface", []) session = get_session() try: with session.begin(): job = m.Job() job.update(values) # These are 'lazy' objects. The initialization below # is needed here because it provides libs, mains, and # interface to be initialized within a session even if # the lists are empty job.mains = [] job.libs = [] job.interface = [] _append_job_binaries(context, session, mains, job.mains) _append_job_binaries(context, session, libs, job.libs) _append_interface(context, interface, job.interface) session.add(job) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for Job: %s") % e.columns) return job def job_update(context, job_id, values): session = get_session() try: with session.begin(): job = _job_get(context, session, job_id) if not job: raise ex.NotFoundException(job_id, _("Job id '%s' not found!")) validate.check_tenant_for_update(context, job) validate.check_protected_from_update(job, values) job.update(values) session.add(job) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for Job: %s") % e.columns) return job def job_destroy(context, job_id): session = get_session() try: with session.begin(): job = _job_get(context, session, job_id) if not job: raise ex.NotFoundException(job_id, _("Job id '%s' not found!")) validate.check_tenant_for_delete(context, job) validate.check_protected_from_delete(job) session.delete(job) except db_exc.DBError as e: msg = ("foreign key constraint" in six.text_type(e) and _(" on foreign key constraint") or "") raise ex.DeletionFailed(_("Job deletion failed%s") % msg) # JobBinary ops def _job_binary_get(context, session, job_binary_id): query = model_query(m.JobBinary, context, session) return query.filter_by(id=job_binary_id).first() def job_binary_get_all(context, regex_search=False, limit=None, marker=None, sort_by=None, **kwargs): sort_by, order = _parse_sorting_args(sort_by) regex_cols = ['name', 'description', 'url'] query = model_query(m.JobBinary, context) if regex_search: query, kwargs = regex_filter(query, m.JobBinary, regex_cols, kwargs) limit = int(limit) if limit else None marker = job_binary_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( query.filter_by(**kwargs).order_by(sort_by).all(), limit, marker, order=order) result = utils.paginate_query(query.filter_by(**kwargs), m.JobBinary, limit, [sort_by], marker, order) return types.Page(result, prev_marker, next_marker) def job_binary_get(context, job_binary_id): """Returns a JobBinary object that does not contain a data field The data column uses deferred loading. """ return _job_binary_get(context, get_session(), job_binary_id) def job_binary_create(context, values): """Returns a JobBinary that does not contain a data field The data column uses deferred loading. """ job_binary = m.JobBinary() job_binary.update(values) session = get_session() try: with session.begin(): session.add(job_binary) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinary: %s") % e.columns) return job_binary def job_binary_update(context, values): """Returns a JobBinary updated with the provided values.""" jb_id = values["id"] session = get_session() try: with session.begin(): jb = _job_binary_get(context, session, jb_id) if not jb: raise ex.NotFoundException( jb_id, _("JobBinary id '%s' not found")) validate.check_tenant_for_update(context, jb) validate.check_protected_from_update(jb, values) # We do not want to update the url for internal binaries new_url = values.get("url", None) if new_url and "internal-db://" in jb["url"]: if jb["url"] != new_url: raise ex.UpdateFailedException( jb_id, _("The url for JobBinary Id '%s' can not " "be updated because it is an internal-db url.")) jobs = job_execution_get_all(context) pending_jobs = [job for job in jobs if job.info["status"] == "PENDING"] if len(pending_jobs) > 0: for job in pending_jobs: if _check_job_binary_referenced( context, session, jb_id, job.job_id): raise ex.UpdateFailedException( jb_id, _("JobBinary Id '%s' is used in a PENDING job " "and can not be updated.")) jb.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinary: %s") % e.columns) return jb def _check_job_binary_referenced(ctx, session, job_binary_id, job_id=None): args = {"JobBinary_id": job_binary_id} if job_id: args["Job_id"] = job_id mains = model_query(m.mains_association, ctx, session, project_only=False).filter_by(**args) libs = model_query(m.libs_association, ctx, session, project_only=False).filter_by(**args) return mains.first() is not None or libs.first() is not None def job_binary_destroy(context, job_binary_id): session = get_session() with session.begin(): job_binary = _job_binary_get(context, session, job_binary_id) if not job_binary: raise ex.NotFoundException(job_binary_id, _("JobBinary id '%s' not found!")) validate.check_tenant_for_delete(context, job_binary) validate.check_protected_from_delete(job_binary) if _check_job_binary_referenced(context, session, job_binary_id): raise ex.DeletionFailed( _("JobBinary is referenced and cannot be deleted")) session.delete(job_binary) # JobBinaryInternal ops def _job_binary_internal_get(context, session, job_binary_internal_id): query = model_query(m.JobBinaryInternal, context, session) return query.filter_by(id=job_binary_internal_id).first() def job_binary_internal_get_all(context, regex_search=False, limit=None, marker=None, sort_by=None, **kwargs): """Returns JobBinaryInternal objects that do not contain a data field The data column uses deferred loading. """ sort_by, order = _parse_sorting_args(sort_by) regex_cols = ['name'] query = model_query(m.JobBinaryInternal, context) if regex_search: query, kwargs = regex_filter(query, m.JobBinaryInternal, regex_cols, kwargs) limit = int(limit) if limit else None marker = job_binary_internal_get(context, marker) prev_marker, next_marker = _get_prev_and_next_objects( query.filter_by(**kwargs).order_by(sort_by).all(), limit, marker, order=order) result = utils.paginate_query(query.filter_by(**kwargs), m.JobBinaryInternal, limit, [sort_by], marker, order) return types.Page(result, prev_marker, next_marker) def job_binary_internal_get(context, job_binary_internal_id): """Returns a JobBinaryInternal object that does not contain a data field The data column uses deferred loading. """ return _job_binary_internal_get(context, get_session(), job_binary_internal_id) def job_binary_internal_get_raw_data(context, job_binary_internal_id): """Returns only the data field for the specified JobBinaryInternal.""" query = model_query(m.JobBinaryInternal, context) res = query.filter_by(id=job_binary_internal_id).first() if res is not None: datasize_KB = res.datasize / 1024.0 if datasize_KB > CONF.job_binary_max_KB: raise ex.DataTooBigException( round(datasize_KB, 1), CONF.job_binary_max_KB, _("Size of internal binary (%(size)sKB) is greater than the " "maximum (%(maximum)sKB)")) # This assignment is sufficient to load the deferred column res = res.data return res def job_binary_internal_create(context, values): """Returns a JobBinaryInternal that does not contain a data field The data column uses deferred loading. """ values["datasize"] = len(values["data"]) datasize_KB = values["datasize"] / 1024.0 if datasize_KB > CONF.job_binary_max_KB: raise ex.DataTooBigException( round(datasize_KB, 1), CONF.job_binary_max_KB, _("Size of internal binary (%(size)sKB) is greater " "than the maximum (%(maximum)sKB)")) job_binary_int = m.JobBinaryInternal() job_binary_int.update(values) session = get_session() try: with session.begin(): session.add(job_binary_int) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinaryInternal: %s") % e.columns) return job_binary_internal_get(context, job_binary_int.id) def job_binary_internal_destroy(context, job_binary_internal_id): session = get_session() with session.begin(): job_binary_internal = _job_binary_internal_get(context, session, job_binary_internal_id) if not job_binary_internal: raise ex.NotFoundException( job_binary_internal_id, _("JobBinaryInternal id '%s' not found!")) validate.check_tenant_for_delete(context, job_binary_internal) validate.check_protected_from_delete(job_binary_internal) session.delete(job_binary_internal) def job_binary_internal_update(context, job_binary_internal_id, values): """Returns a JobBinary updated with the provided values.""" session = get_session() try: with session.begin(): j_b_i = _job_binary_internal_get( context, session, job_binary_internal_id) if not j_b_i: raise ex.NotFoundException( job_binary_internal_id, _("JobBinaryInternal id '%s' not found!")) validate.check_tenant_for_update(context, j_b_i) validate.check_protected_from_update(j_b_i, values) j_b_i.update(values) except db_exc.DBDuplicateEntry as e: raise ex.DBDuplicateEntry( _("Duplicate entry for JobBinaryInternal: %s") % e.columns) return j_b_i # Events ops def _cluster_provision_step_get(context, session, provision_step_id): query = model_query(m.ClusterProvisionStep, context, session) return query.filter_by(id=provision_step_id).first() def _cluster_provision_step_update(context, session, step_id): step = _cluster_provision_step_get(context, session, step_id) if step is None: raise ex.NotFoundException( step_id, _("Cluster Provision Step id '%s' not found!")) if step.successful is not None: return if len(step.events) == step.total: for event in step.events: session.delete(event) step.update({'successful': True}) def cluster_provision_step_add(context, cluster_id, values): session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if not cluster: raise ex.NotFoundException(cluster_id, _("Cluster id '%s' not found!")) provision_step = m.ClusterProvisionStep() values['cluster_id'] = cluster_id values['tenant_id'] = context.tenant_id provision_step.update(values) session.add(provision_step) return provision_step.id def cluster_provision_step_update(context, step_id): if CONF.disable_event_log: return session = get_session() with session.begin(): _cluster_provision_step_update(context, session, step_id) def cluster_provision_progress_update(context, cluster_id): if CONF.disable_event_log: return _cluster_get(context, get_session(), cluster_id) session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if cluster is None: raise ex.NotFoundException(cluster_id, _("Cluster id '%s' not found!")) for step in cluster.provision_progress: if step.successful is None: _cluster_provision_step_update(context, session, step.id) result_cluster = _cluster_get(context, session, cluster_id) return result_cluster def cluster_event_add(context, step_id, values): session = get_session() with session.begin(): provision_step = _cluster_provision_step_get( context, session, step_id) if not provision_step: raise ex.NotFoundException( step_id, _("Cluster Provision Step id '%s' not found!")) event = m.ClusterEvent() values['step_id'] = step_id if not values['successful']: provision_step.update({'successful': False}) event.update(values) session.add(event) return event.id # Cluster verifications / health check ops def _cluster_verification_get(context, session, verification_id): # tenant id is not presented query = model_query(m.ClusterVerification, context, session, project_only=False) return query.filter_by(id=verification_id).first() def cluster_verification_get(context, verification_id): return _cluster_verification_get(context, get_session(), verification_id) def cluster_verification_add(context, cluster_id, values): session = get_session() with session.begin(): cluster = _cluster_get(context, session, cluster_id) if not cluster: raise ex.NotFoundException( cluster_id, _("Cluster id '%s' not found!")) verification = m.ClusterVerification() values['cluster_id'] = cluster_id verification.update(values) session.add(verification) return _cluster_verification_get(context, session, verification.id) def cluster_verification_update(context, verification_id, values): session = get_session() with session.begin(): verification = _cluster_verification_get( context, session, verification_id) if not verification: raise ex.NotFoundException( verification_id, _("Verification id '%s' not found!")) verification.update(values) return verification def cluster_verification_delete(context, verification_id): session = get_session() with session.begin(): verification = _cluster_verification_get( context, session, verification_id) if not verification: raise ex.NotFoundException( verification_id, _("Verification id '%s' not found!")) for check in verification.checks: session.delete(check) session.delete(verification) def _cluster_health_check_get(context, session, health_check_id): # tenant id is not presented query = model_query(m.ClusterHealthCheck, context, session, project_only=False) return query.filter_by(id=health_check_id).first() def cluster_health_check_get(context, health_check_id): return _cluster_health_check_get(context, get_session(), health_check_id) def cluster_health_check_add(context, verification_id, values): session = get_session() with session.begin(): verification = _cluster_verification_get( context, session, verification_id) if not verification: raise ex.NotFoundException( verification_id, _("Verification id '%s' not found!")) health_check = m.ClusterHealthCheck() values['verification_id'] = verification_id values['tenant_id'] = context.tenant_id health_check.update(values) session.add(health_check) return health_check def cluster_health_check_update(context, health_check_id, values): session = get_session() with session.begin(): health_check = _cluster_health_check_get( context, session, health_check_id) if not health_check: raise ex.NotFoundException( health_check_id, _("Health check id '%s' not found!")) health_check.update(values) return health_check def _plugin_get(context, session, name): query = model_query(m.PluginData, context, session) return query.filter_by(name=name).first() def plugin_get(context, name): session = get_session() with session.begin(): data = _plugin_get(context, session, name) return data def plugin_create(context, values): session = get_session() with session.begin(): plugin = m.PluginData() values['tenant_id'] = context.tenant_id plugin.update(values) session.add(plugin) return plugin def plugin_get_all(context): query = model_query(m.PluginData, context) return query.all() def plugin_update(context, name, values): session = get_session() with session.begin(): plugin = _plugin_get(context, session, name) if not plugin: raise ex.NotFoundException(name, _("Plugin name '%s' not found!")) plugin.update(values) return plugin def plugin_remove(context, name): session = get_session() with session.begin(): plugin = _plugin_get(context, session, name) if not plugin: raise ex.NotFoundException(name, _("Plugin name '%s' not found!")) session.delete(plugin) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/sqlalchemy/model_base.py0000664000175000017500000000333300000000000021514 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_db.sqlalchemy import models as oslo_models from sqlalchemy.ext import declarative from sqlalchemy.orm import attributes class _SaharaBase(oslo_models.ModelBase, oslo_models.TimestampMixin): """Base class for all SQLAlchemy DB Models.""" def to_dict(self): """sqlalchemy based automatic to_dict method.""" d = {} # if a column is unloaded at this point, it is # probably deferred. We do not want to access it # here and thereby cause it to load... unloaded = attributes.instance_state(self).unloaded for col in self.__table__.columns: if col.name not in unloaded: d[col.name] = getattr(self, col.name) datetime_to_str(d, 'created_at') datetime_to_str(d, 'updated_at') return d def datetime_to_str(dct, attr_name): if dct.get(attr_name) is not None: value = dct[attr_name].isoformat('T') ms_delimiter = value.find(".") if ms_delimiter != -1: # Removing ms from time value = value[:ms_delimiter] dct[attr_name] = value SaharaBase = declarative.declarative_base(cls=_SaharaBase) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/sqlalchemy/models.py0000664000175000017500000005064300000000000020713 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils import sqlalchemy as sa from sqlalchemy.orm import relationship from sahara.db.sqlalchemy import model_base as mb from sahara.db.sqlalchemy import types as st # Helpers def _generate_unicode_uuid(): return uuidutils.generate_uuid() def _id_column(): return sa.Column(sa.String(36), primary_key=True, default=_generate_unicode_uuid) # Main objects: Cluster, NodeGroup, Instance class Cluster(mb.SaharaBase): """Contains all info about cluster.""" __tablename__ = 'clusters' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() name = sa.Column(sa.String(80), nullable=False) description = sa.Column(sa.Text) tenant_id = sa.Column(sa.String(36)) trust_id = sa.Column(sa.String(36)) is_transient = sa.Column(sa.Boolean, default=False) plugin_name = sa.Column(sa.String(80), nullable=False) hadoop_version = sa.Column(sa.String(80), nullable=False) cluster_configs = sa.Column(st.JsonDictType()) default_image_id = sa.Column(sa.String(36)) neutron_management_network = sa.Column(sa.String(36)) anti_affinity = sa.Column(st.JsonListType()) anti_affinity_ratio = sa.Column(sa.Integer, default=1) management_private_key = sa.Column(sa.Text, nullable=False) management_public_key = sa.Column(sa.Text, nullable=False) user_keypair_id = sa.Column(sa.String(80)) status = sa.Column(sa.String(80)) status_description = sa.Column(st.LongText()) info = sa.Column(st.JsonDictType()) extra = sa.Column(st.JsonDictType()) rollback_info = sa.Column(st.JsonDictType()) sahara_info = sa.Column(st.JsonDictType()) use_autoconfig = sa.Column(sa.Boolean(), default=True) provision_progress = relationship('ClusterProvisionStep', cascade="all,delete", backref='cluster', lazy='subquery') verification = relationship('ClusterVerification', cascade="all,delete", backref="cluster", lazy='joined') node_groups = relationship('NodeGroup', cascade="all,delete", backref='cluster', lazy='subquery') cluster_template_id = sa.Column(sa.String(36), sa.ForeignKey('cluster_templates.id')) cluster_template = relationship('ClusterTemplate', backref="clusters") shares = sa.Column(st.JsonListType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) domain_name = sa.Column(sa.String(255)) def to_dict(self, show_progress=False): d = super(Cluster, self).to_dict() d['node_groups'] = [ng.to_dict() for ng in self.node_groups] d['provision_progress'] = [pp.to_dict(show_progress) for pp in self.provision_progress] if self.verification: d['verification'] = self.verification[0].to_dict() return d class NodeGroup(mb.SaharaBase): """Specifies group of nodes within a cluster.""" __tablename__ = 'node_groups' __table_args__ = ( sa.UniqueConstraint('name', 'cluster_id'), ) id = _id_column() name = sa.Column(sa.String(80), nullable=False) tenant_id = sa.Column(sa.String(36)) flavor_id = sa.Column(sa.String(36), nullable=False) image_id = sa.Column(sa.String(36)) image_username = sa.Column(sa.String(36)) node_processes = sa.Column(st.JsonListType()) node_configs = sa.Column(st.JsonDictType()) volumes_per_node = sa.Column(sa.Integer) volumes_size = sa.Column(sa.Integer) volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) volume_type = sa.Column(sa.String(255)) boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False) boot_volume_type = sa.Column(sa.String(255)) boot_volume_availability_zone = sa.Column(sa.String(255)) boot_volume_local_to_instance = sa.Column(sa.Boolean()) count = sa.Column(sa.Integer, nullable=False) use_autoconfig = sa.Column(sa.Boolean(), default=True) instances = relationship('Instance', cascade="all,delete", backref='node_group', order_by="Instance.instance_name", lazy='subquery') cluster_id = sa.Column(sa.String(36), sa.ForeignKey('clusters.id')) node_group_template_id = sa.Column(sa.String(36), sa.ForeignKey( 'node_group_templates.id')) node_group_template = relationship('NodeGroupTemplate', backref="node_groups") floating_ip_pool = sa.Column(sa.String(36)) security_groups = sa.Column(st.JsonListType()) auto_security_group = sa.Column(sa.Boolean()) availability_zone = sa.Column(sa.String(255)) open_ports = sa.Column(st.JsonListType()) is_proxy_gateway = sa.Column(sa.Boolean()) volume_local_to_instance = sa.Column(sa.Boolean()) shares = sa.Column(st.JsonListType()) def to_dict(self): d = super(NodeGroup, self).to_dict() d['instances'] = [i.to_dict() for i in self.instances] return d class Instance(mb.SaharaBase): """An OpenStack instance created for the cluster.""" __tablename__ = 'instances' __table_args__ = ( sa.UniqueConstraint('instance_id', 'node_group_id'), ) id = _id_column() tenant_id = sa.Column(sa.String(36)) node_group_id = sa.Column(sa.String(36), sa.ForeignKey('node_groups.id')) instance_id = sa.Column(sa.String(36)) instance_name = sa.Column(sa.String(80), nullable=False) internal_ip = sa.Column(sa.String(45)) management_ip = sa.Column(sa.String(45)) volumes = sa.Column(st.JsonListType()) storage_devices_number = sa.Column(sa.Integer) dns_hostname = sa.Column(sa.String(255)) # Template objects: ClusterTemplate, NodeGroupTemplate, TemplatesRelation class ClusterTemplate(mb.SaharaBase): """Template for Cluster.""" __tablename__ = 'cluster_templates' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() name = sa.Column(sa.String(80), nullable=False) description = sa.Column(sa.Text) cluster_configs = sa.Column(st.JsonDictType()) default_image_id = sa.Column(sa.String(36)) anti_affinity = sa.Column(st.JsonListType()) tenant_id = sa.Column(sa.String(36)) neutron_management_network = sa.Column(sa.String(36)) plugin_name = sa.Column(sa.String(80), nullable=False) hadoop_version = sa.Column(sa.String(80), nullable=False) node_groups = relationship('TemplatesRelation', cascade="all,delete", backref='cluster_template', lazy='subquery') is_default = sa.Column(sa.Boolean(), default=False) use_autoconfig = sa.Column(sa.Boolean(), default=True) shares = sa.Column(st.JsonListType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) domain_name = sa.Column(sa.String(255)) def to_dict(self): d = super(ClusterTemplate, self).to_dict() d['node_groups'] = [tr.to_dict() for tr in self.node_groups] return d class NodeGroupTemplate(mb.SaharaBase): """Template for NodeGroup.""" __tablename__ = 'node_group_templates' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() name = sa.Column(sa.String(80), nullable=False) description = sa.Column(sa.Text) tenant_id = sa.Column(sa.String(36)) flavor_id = sa.Column(sa.String(36), nullable=False) image_id = sa.Column(sa.String(36)) plugin_name = sa.Column(sa.String(80), nullable=False) hadoop_version = sa.Column(sa.String(80), nullable=False) node_processes = sa.Column(st.JsonListType()) node_configs = sa.Column(st.JsonDictType()) volumes_per_node = sa.Column(sa.Integer, nullable=False) volumes_size = sa.Column(sa.Integer) volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) volume_type = sa.Column(sa.String(255)) boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False) boot_volume_type = sa.Column(sa.String(255)) boot_volume_availability_zone = sa.Column(sa.String(255)) boot_volume_local_to_instance = sa.Column(sa.Boolean()) floating_ip_pool = sa.Column(sa.String(36)) security_groups = sa.Column(st.JsonListType()) auto_security_group = sa.Column(sa.Boolean()) availability_zone = sa.Column(sa.String(255)) is_proxy_gateway = sa.Column(sa.Boolean()) volume_local_to_instance = sa.Column(sa.Boolean()) is_default = sa.Column(sa.Boolean(), default=False) use_autoconfig = sa.Column(sa.Boolean(), default=True) shares = sa.Column(st.JsonListType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) class TemplatesRelation(mb.SaharaBase): """NodeGroupTemplate - ClusterTemplate relationship. In fact, it's a template of NodeGroup in Cluster. """ __tablename__ = 'templates_relations' id = _id_column() tenant_id = sa.Column(sa.String(36)) name = sa.Column(sa.String(80), nullable=False) flavor_id = sa.Column(sa.String(36), nullable=False) image_id = sa.Column(sa.String(36)) node_processes = sa.Column(st.JsonListType()) node_configs = sa.Column(st.JsonDictType()) volumes_per_node = sa.Column(sa.Integer) volumes_size = sa.Column(sa.Integer) volumes_availability_zone = sa.Column(sa.String(255)) volume_mount_prefix = sa.Column(sa.String(80)) volume_type = sa.Column(sa.String(255)) boot_from_volume = sa.Column(sa.Boolean(), default=False, nullable=False) boot_volume_type = sa.Column(sa.String(255)) boot_volume_availability_zone = sa.Column(sa.String(255)) boot_volume_local_to_instance = sa.Column(sa.Boolean()) count = sa.Column(sa.Integer, nullable=False) use_autoconfig = sa.Column(sa.Boolean(), default=True) cluster_template_id = sa.Column(sa.String(36), sa.ForeignKey('cluster_templates.id')) node_group_template_id = sa.Column(sa.String(36), sa.ForeignKey( 'node_group_templates.id')) node_group_template = relationship('NodeGroupTemplate', backref="templates_relations") floating_ip_pool = sa.Column(sa.String(36)) security_groups = sa.Column(st.JsonListType()) auto_security_group = sa.Column(sa.Boolean()) availability_zone = sa.Column(sa.String(255)) is_proxy_gateway = sa.Column(sa.Boolean()) volume_local_to_instance = sa.Column(sa.Boolean()) shares = sa.Column(st.JsonListType()) # EDP objects: DataSource, Job, Job Execution, JobBinary class DataSource(mb.SaharaBase): """DataSource - represent a diffident types of data sources. e.g. Swift, Cassandra etc. """ __tablename__ = 'data_sources' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() tenant_id = sa.Column(sa.String(36)) name = sa.Column(sa.String(80), nullable=False) description = sa.Column(sa.Text()) type = sa.Column(sa.String(80), nullable=False) url = sa.Column(sa.String(256), nullable=False) credentials = sa.Column(st.JsonDictType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) class JobExecution(mb.SaharaBase): """JobExecution - represent a job execution of specific cluster.""" __tablename__ = 'job_executions' id = _id_column() tenant_id = sa.Column(sa.String(36)) job_id = sa.Column(sa.String(36), sa.ForeignKey('jobs.id')) input_id = sa.Column(sa.String(36), sa.ForeignKey('data_sources.id')) output_id = sa.Column(sa.String(36), sa.ForeignKey('data_sources.id')) start_time = sa.Column(sa.DateTime()) end_time = sa.Column(sa.DateTime()) cluster_id = sa.Column(sa.String(36), sa.ForeignKey('clusters.id')) info = sa.Column(st.JsonDictType()) engine_job_id = sa.Column(sa.String(100)) return_code = sa.Column(sa.String(80)) job_configs = sa.Column(st.JsonDictType()) extra = sa.Column(st.JsonDictType()) data_source_urls = sa.Column(st.JsonDictType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) def to_dict(self): d = super(JobExecution, self).to_dict() # The oozie_job_id filed is renamed to engine_job_id # to make this field more universal. But, we need to # carry both engine_job_id and oozie_job_id until we # can deprecate "oozie_job_id". d['oozie_job_id'] = self.engine_job_id return d mains_association = sa.Table("mains_association", mb.SaharaBase.metadata, sa.Column("Job_id", sa.String(36), sa.ForeignKey("jobs.id")), sa.Column("JobBinary_id", sa.String(36), sa.ForeignKey("job_binaries.id")) ) libs_association = sa.Table("libs_association", mb.SaharaBase.metadata, sa.Column("Job_id", sa.String(36), sa.ForeignKey("jobs.id")), sa.Column("JobBinary_id", sa.String(36), sa.ForeignKey("job_binaries.id")) ) class Job(mb.SaharaBase): """Job - description and location of a job binary.""" __tablename__ = 'jobs' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() tenant_id = sa.Column(sa.String(36)) name = sa.Column(sa.String(80), nullable=False) description = sa.Column(sa.Text()) type = sa.Column(sa.String(80), nullable=False) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) mains = relationship("JobBinary", secondary=mains_association, lazy="subquery") libs = relationship("JobBinary", secondary=libs_association, lazy="subquery") interface = relationship('JobInterfaceArgument', cascade="all,delete", order_by="JobInterfaceArgument.order", backref='job', lazy='subquery') def to_dict(self): d = super(Job, self).to_dict() d['mains'] = [jb.to_dict() for jb in self.mains] d['libs'] = [jb.to_dict() for jb in self.libs] d['interface'] = [arg.to_dict() for arg in self.interface] return d class JobInterfaceArgument(mb.SaharaBase): """JobInterfaceArgument - Configuration setting for a specific job.""" __tablename__ = 'job_interface_arguments' __table_args__ = ( sa.UniqueConstraint('job_id', 'name'), sa.UniqueConstraint('job_id', 'order') ) id = _id_column() job_id = sa.Column(sa.String(36), sa.ForeignKey('jobs.id'), nullable=False) tenant_id = sa.Column(sa.String(36)) name = sa.Column(sa.String(80), nullable=False) description = sa.Column(sa.Text()) mapping_type = sa.Column(sa.String(80), nullable=False) location = sa.Column(sa.Text(), nullable=False) value_type = sa.Column(sa.String(80), nullable=False) required = sa.Column(sa.Boolean(), nullable=False) order = sa.Column(sa.SmallInteger(), nullable=False) default = sa.Column(sa.Text()) class JobBinaryInternal(mb.SaharaBase): """JobBinaryInternal - raw binary storage for executable jobs.""" __tablename__ = 'job_binary_internal' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() tenant_id = sa.Column(sa.String(36)) name = sa.Column(sa.String(80), nullable=False) data = sa.orm.deferred(sa.Column(st.LargeBinary())) datasize = sa.Column(sa.BIGINT) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) class JobBinary(mb.SaharaBase): """JobBinary - raw binary storage for executable jobs.""" __tablename__ = 'job_binaries' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() tenant_id = sa.Column(sa.String(36)) name = sa.Column(sa.String(80), nullable=False) description = sa.Column(sa.Text()) url = sa.Column(sa.String(256), nullable=False) extra = sa.Column(st.JsonDictType()) is_public = sa.Column(sa.Boolean()) is_protected = sa.Column(sa.Boolean()) class ClusterEvent(mb.SaharaBase): """"Event - represent a info about current provision step.""" __tablename__ = 'cluster_events' __table_args__ = ( sa.UniqueConstraint('id', 'step_id'), ) id = _id_column() node_group_id = sa.Column(sa.String(36)) instance_id = sa.Column(sa.String(36)) instance_name = sa.Column(sa.String(80)) event_info = sa.Column(sa.Text) successful = sa.Column(sa.Boolean, nullable=False) step_id = sa.Column(sa.String(36), sa.ForeignKey( 'cluster_provision_steps.id')) class ClusterProvisionStep(mb.SaharaBase): """ProvisionStep - represent a current provision step of cluster.""" __tablename__ = 'cluster_provision_steps' __table_args__ = ( sa.UniqueConstraint('id', 'cluster_id'), ) id = _id_column() cluster_id = sa.Column(sa.String(36), sa.ForeignKey('clusters.id')) tenant_id = sa.Column(sa.String(36)) step_name = sa.Column(sa.String(80)) step_type = sa.Column(sa.String(36)) total = sa.Column(sa.Integer) successful = sa.Column(sa.Boolean, nullable=True) events = relationship('ClusterEvent', cascade="all,delete", backref='ClusterProvisionStep', lazy='subquery') def to_dict(self, show_progress): d = super(ClusterProvisionStep, self).to_dict() if show_progress: d['events'] = [event.to_dict() for event in self.events] return d class ClusterVerification(mb.SaharaBase): """ClusterVerification represent results of cluster health checks.""" __tablename__ = 'cluster_verifications' __table_args__ = (sa.UniqueConstraint('id', 'cluster_id'),) id = _id_column() cluster_id = sa.Column( sa.String(36), sa.ForeignKey('clusters.id')) status = sa.Column(sa.String(15)) checks = relationship( 'ClusterHealthCheck', cascade="all,delete", backref='ClusterVerification', lazy='subquery') def to_dict(self): base = super(ClusterVerification, self).to_dict() base['checks'] = [check.to_dict() for check in self.checks] return base class ClusterHealthCheck(mb.SaharaBase): """ClusterHealthCheck respresent cluster health check.""" __tablename__ = 'cluster_health_checks' __table_args__ = (sa.UniqueConstraint('id', 'verification_id'),) id = _id_column() verification_id = sa.Column( sa.String(36), sa.ForeignKey('cluster_verifications.id')) status = sa.Column(sa.String(15)) description = sa.Column(sa.Text) name = sa.Column(sa.String(80)) class PluginData(mb.SaharaBase): """Plugin Data represents Provisioning Plugin.""" __tablename__ = 'plugin_data' __table_args__ = ( sa.UniqueConstraint('name', 'tenant_id'), ) id = _id_column() tenant_id = sa.Column(sa.String(36), nullable=False) name = sa.Column(sa.String(15), nullable=False) plugin_labels = sa.Column(st.JsonDictType()) version_labels = sa.Column(st.JsonDictType()) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/sqlalchemy/types.py0000664000175000017500000000672300000000000020574 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils import sqlalchemy as sa from sqlalchemy.dialects import mysql from sqlalchemy.ext import mutable class JsonEncoded(sa.TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = sa.Text def process_bind_param(self, value, dialect): if value is not None: value = jsonutils.dumps(value) return value def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class MutableDict(mutable.Mutable, dict): @classmethod def coerce(cls, key, value): """Convert plain dictionaries to MutableDict.""" if not isinstance(value, MutableDict): if isinstance(value, dict): return MutableDict(value) # this call will raise ValueError return mutable.Mutable.coerce(key, value) else: return value def update(self, e=None, **f): """Detect dictionary update events and emit change events.""" dict.update(self, e, **f) self.changed() def __setitem__(self, key, value): """Detect dictionary set events and emit change events.""" dict.__setitem__(self, key, value) self.changed() def __delitem__(self, key): """Detect dictionary del events and emit change events.""" dict.__delitem__(self, key) self.changed() class MutableList(mutable.Mutable, list): @classmethod def coerce(cls, key, value): """Convert plain lists to MutableList.""" if not isinstance(value, MutableList): if isinstance(value, list): return MutableList(value) # this call will raise ValueError return mutable.Mutable.coerce(key, value) else: return value def append(self, value): """Detect list add events and emit change events.""" list.append(self, value) self.changed() def remove(self, value): """Removes an item by value and emit change events.""" list.remove(self, value) self.changed() def __setitem__(self, key, value): """Detect list set events and emit change events.""" list.__setitem__(self, key, value) self.changed() def __delitem__(self, i): """Detect list del events and emit change events.""" list.__delitem__(self, i) self.changed() def JsonDictType(): """Returns an SQLAlchemy Column Type suitable to store a Json dict.""" return MutableDict.as_mutable(JsonEncoded) def JsonListType(): """Returns an SQLAlchemy Column Type suitable to store a Json array.""" return MutableList.as_mutable(JsonEncoded) def LargeBinary(): return sa.LargeBinary().with_variant(mysql.LONGBLOB(), 'mysql') def LongText(): return sa.Text().with_variant(mysql.LONGTEXT(), 'mysql') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7058911 sahara-16.0.0/sahara/db/templates/0000775000175000017500000000000000000000000016702 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/templates/README.rst0000664000175000017500000002300200000000000020366 0ustar00zuulzuul00000000000000Sahara Default Template CLI =========================== The *sahara-templates* application is a simple CLI for managing default templates in Sahara. This document gives an overview of default templates and explains how to use the CLI. Default Templates Overview -------------------------- The goal of the default template facility in Sahara is to make cluster launching quick and easy by providing users with a stable set of pre-generated node group and cluster templates for each of the Sahara provisioning plugins. Template sets are defined in .json files grouped into directories. The CLI reads these template sets and writes directly to the Sahara database. Default templates may only be created, modified, or deleted via the CLI -- operations through the python-saharaclient or REST API are restricted. JSON Files ---------- Cluster and node group templates are defined in .json files. A very simple cluster template JSON file might look like this: .. code:: python { "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_groups": [ { "name": "master", "count": 1, "node_group_template_id": "{master}" }, { "name": "worker", "count": 3, "node_group_template_id": "{worker}" } ], "name": "cluster-template" } The values of the *node_group_template_id* fields are the names of node group templates in set braces. In this example, *master* and *worker* are the names of node group templates defined in .json files in the same directory. When the CLI processes the directory, it will create the node group templates first and then substitute the appropriate id values for the name references when it creates the cluster template. Configuration Files and Value Substitutions ------------------------------------------- The CLI supports value substitution for a limited set of fields. For cluster templates, the following fields may use substitution: * default_image_id * neutron_management_network For node group templates, the following fields may use substitution: * image_id * flavor_id * floating_ip_pool Substitution is indicated for one of these fields in a .json file when the value is the name of the field in set braces. Here is an example of a node group template file that uses substitution for *flavor_id*: .. code:: python { "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_processes": [ "namenode", "resourcemanager", "oozie", "historyserver" ], "name": "master", "flavor_id": "{flavor_id}", "floating_ip_pool": "{floating_ip_pool}" } The values for *flavor_id* and *floating_ip_pool* in this template will come from a configuration file. If a configuration value is found for the substitution, the value will be replaced. If a configuration value is not found, the field will be omitted from the template. (In this example, *flavor_id* is a required field of node group templates and the template will fail validation if there is no substitution value specified. However, *floating_ip_pool* is not required and so the template will still pass validation if it is omitted). The CLI will look for configuration sections with names based on the *plugin_name*, *hadoop_version*, and *name* fields in the template. It will look for sections in the following order: * **[]** May contain fields only for the type of the named template If templates are named in an **unambiguous** way, the template name alone can be a used as the name of the config section. This produces shorter names and aids readability when there is a one-to-one mapping between template names and config sections. * **[__]** May contain fields only for the type of the named template This form unambiguously applies to a specific template for a specific plugin. * **[_]** May contain node group or cluster template fields * **[]** May contain node group or cluster template fields * **[DEFAULT]** May contain node group or cluster template fields If we have the following configuration file in our example the CLI will find the value of *flavor_id* for the *master* template in the first configuration section and the value for *floating_ip_pool* in the third section: .. code:: python [vanilla_2.7.1_master] # This is named for the plugin, version, and template. # It may contain only node group template fields. flavor_id = 5 image_id = b7883f8a-9a7f-42cc-89a2-d3c8b1cc7b28 [vanilla_2.7.1] # This is named for the plugin and version. # It may contain fields for both node group and cluster templates. flavor_id = 4 neutron_management_network = 9973da0b-68eb-497d-bd48-d85aca37f088 [vanilla] # This is named for the plugin. # It may contain fields for both node group and cluster templates. flavor_id = 3 default_image_id = 89de8d21-9743-4d20-873e-7677973416dd floating_ip_pool = my_pool [DEFAULT] # This is the normal default section. # It may contain fields for both node group and cluster templates. flavor_id = 2 Sample Configuration File ------------------------- A sample configuration file is provided in *sahara/plugins/default_templates/template.conf*. This file sets the *flavor_id* for most of the node group templates supplied with Sahara to 2 which indicates the *m1.small* flavor in a default OpenStack deployment. The master node templates for the CDH plugin have the *flavor_id* set to 4 which indicates the *m1.large* flavor, since these nodes require more resources. This configuration file may be used with the CLI as is, or it may be copied and modified. Note that multiple configuration files may be passed to the CLI by repeating the *--config-file* option. Other Special Configuration Parameters -------------------------------------- The only configuration parameter that is strictly required is the *connection* parameter in the *database* section. Without this value the CLI will not be able to connect to the Sahara database. By default, the CLI will use the value of the *plugins* parameter in the [DEFAULT] section on *update* to filter the templates that will be created or updated. This parameter in Sahara defaults to the set of fully supported plugins. To restrict the set of plugins for the *update* operation set this parameter or use the *--plugin-name* option. Directory Structure ------------------- The structure of the directory holding .json files for the CLI is very flexible. The CLI will begin processing at the designated starting directory and recurse through subdirectories. At each directory level, the CLI will look for .json files to define a set of default templates. Cluster templates may reference node group templates in the same set by name. Templates at different levels in the directory structure are not in the same set. Plugin name and version are determined from the values in the .json files, not by the file names or the directory structure. Recursion may be turned off with the "-n" option (see below). The default starting directory is *sahara/plugins/default_templates* Example CLI Commands -------------------- For ``update``, ``delete``, ``node-group-template-delete``, and ``cluster-template-delete`` operations, the tenant must always be specified. For ``node-group-template-delete-id`` and ``cluster-template-delete-id`` tenant is not required. All useful information about activity by the CLI is logged Create/update all of the default templates bundled with Sahara. Use the standard Sahara configuration file in */etc/sahara/sahara.conf* to specify the plugin list and the database connection string and another configuration file to supply the *flavor_id* values:: $ sahara-templates --config-file /etc/sahara/sahara.conf --config-file myconfig update -t $TENANT_ID Create/update default templates from the directory *mypath*:: $ sahara-templates --config-file myconfig update -t $TENANT_ID -d mypath Create/update default templates from the directory *mypath* but do not descend into subdirectories:: $ sahara-templates --config-file myconfig update -t $TENANT_ID -d mypath -n Create/update default templates bundled with Sahara for just the vanilla plugin:: $ sahara-templates --config-file myconfig update -t $TENANT_ID -p vanilla Create/update default templates bundled with Sahara for just version 2.7.1 of the vanilla plugin:: $ sahara-templates --config-file myconfig update -t $TENANT_ID -p vanilla -pv 2.7.1 Create/update default templates bundled with Sahara for just version 2.7.1 of the vanilla plugin and version 2.0.6 of the hdp plugin:: $ sahara-templates --config-file myconfig update -t $TENANT_ID -p vanilla -pv vanilla.2.7.1 -p hdp -pv hdp.2.0.6 Delete default templates for the vanilla plugin:: $ sahara-templates --config-file myconfig delete -t $TENANT_ID -p vanilla Delete default templates for version 2.7.1 of the vanilla plugin:: $ sahara-templates --config-file myconfig delete -t $TENANT_ID -p vanilla -pv 2.7.1 Delete a specific node group template by ID:: $ sahara-templates --config-file myconfig node-group-template-delete-id --id ID Delete a specific cluster template by ID:: $ sahara-templates --config-file myconfig cluster-template-delete-id --id ID Delete a specific node group template by name:: $ sahara-templates --config-file myconfig node-group-template-delete --name NAME -t $TENANT_ID Delete a specific cluster template by name:: $ sahara-templates --config-file myconfig cluster-template-delete --name NAME -t $TENANT_ID ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/templates/__init__.py0000664000175000017500000000000000000000000021001 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/templates/api.py0000664000175000017500000007552200000000000020040 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import os import jsonschema from oslo_config import cfg from oslo_serialization import jsonutils as json from oslo_utils import uuidutils import six from sahara import conductor from sahara.db.templates import utils as u from sahara.service.validations import cluster_template_schema as clt from sahara.service.validations import node_group_template_schema as ngt from sahara.utils import api_validator LOG = None CONF = None # This is broken out to support testability def set_logger(log): global LOG LOG = log # This is broken out to support testability def set_conf(conf): global CONF CONF = conf ng_validator = api_validator.ApiValidator(ngt.NODE_GROUP_TEMPLATE_SCHEMA) ct_validator = api_validator.ApiValidator(clt.CLUSTER_TEMPLATE_SCHEMA) # Options that we allow to be replaced in a node group template node_group_template_opts = [ cfg.StrOpt('image_id', help='Image id field for a node group template.'), cfg.StrOpt('flavor_id', help='Flavor id field for a node group template.'), cfg.StrOpt('floating_ip_pool', help='Floating ip pool field for a node group template.'), cfg.BoolOpt('auto_security_group', default=False, help='Auto security group field for node group template.'), cfg.ListOpt('security_groups', default=[], help='Security group field for node group template.') ] # Options that we allow to be replaced in a cluster template cluster_template_opts = [ cfg.StrOpt('default_image_id', help='Default image id field for a cluster template.'), cfg.StrOpt('neutron_management_network', help='Neutron management network ' 'field for a cluster template.')] all_template_opts = node_group_template_opts + cluster_template_opts node_group_template_opt_names = [o.name for o in node_group_template_opts] cluster_template_opt_names = [o.name for o in cluster_template_opts] # This is a local exception class that is used to exit routines # in cases where error information has already been logged. # It is caught and suppressed everywhere it is used. class Handled(Exception): pass class Context(object): '''Create a pseudo Context object Since this tool does not use the REST interface, we do not have a request from which to build a Context. ''' def __init__(self, is_admin=False, tenant_id=None): self.is_admin = is_admin self.tenant_id = tenant_id def check_usage_of_existing(ctx, ng_templates, cl_templates): '''Determine if any of the specified templates are in use This method searches for the specified templates by name and determines whether or not any existing templates are in use by a cluster or cluster template. Returns True if any of the templates are in use. :param ng_templates: A list of dictionaries. Each dictionary has a "template" entry that represents a node group template. :param cl_templates: A list of dictionaries. Each dictionary has a "template" entry that represents a cluster template :returns: True if any of the templates are in use, False otherwise ''' error = False clusters = conductor.API.cluster_get_all(ctx) for ng_info in ng_templates: ng = u.find_node_group_template_by_name(ctx, ng_info["template"]["name"]) if ng: cluster_users, template_users = u.check_node_group_template_usage( ng["id"], clusters) if cluster_users: LOG.warning("Node group template {name} " "in use by clusters {clusters}".format( name=ng["name"], clusters=cluster_users)) if template_users: LOG.warning("Node group template {name} " "in use by cluster templates {templates}".format( name=ng["name"], templates=template_users)) if cluster_users or template_users: LOG.warning("Update of node group template " "{name} is not allowed".format(name=ng["name"])) error = True for cl_info in cl_templates: cl = u.find_cluster_template_by_name(ctx, cl_info["template"]["name"]) if cl: cluster_users = u.check_cluster_template_usage(cl["id"], clusters) if cluster_users: LOG.warning("Cluster template {name} " "in use by clusters {clusters}".format( name=cl["name"], clusters=cluster_users)) LOG.warning("Update of cluster template " "{name} is not allowed".format(name=cl["name"])) error = True return error def log_skipping_dir(path, reason=""): if reason: reason = ", " + reason LOG.warning("Skipping processing for {dir}{reason}".format( dir=path, reason=reason)) def check_cluster_templates_valid(ng_templates, cl_templates): # Check that if name references to node group templates # are replaced with a uuid value that the cluster template # passes JSON validation. We don't have the real uuid yet, # but this will allow the validation test. if ng_templates: dummy_uuid = uuidutils.generate_uuid() ng_ids = {ng["template"]["name"]: dummy_uuid for ng in ng_templates} else: ng_ids = {} for cl in cl_templates: template = copy.deepcopy(cl["template"]) u.substitute_ng_ids(template, ng_ids) try: ct_validator.validate(template) except jsonschema.ValidationError as e: LOG.warning("Validation for {path} failed, {reason}".format( path=cl["path"], reason=e)) return True return False def add_config_section(section_name, options): if section_name and hasattr(CONF, section_name): # It's already been added return if section_name: group = cfg.OptGroup(name=section_name) CONF.register_group(group) CONF.register_opts(options, group) else: # Add options to the default section CONF.register_opts(options) def add_config_section_for_template(template): '''Register a config section based on the template values Check to see if the configuration files contain a section that corresponds to the template. If an appropriate section can be found, register options for the template so that the config values can be read and applied to the template via substitution (oslo supports registering groups and options at any time, before or after the config files are parsed). Corresponding section names may be of the following forms: , example "hdp-2.0.6-master" This is useful when a template naming convention is being used, so that the template name is already unambiguous __, example "hdp_2.0.6_master" This can be used if there is a name collision between templates _, example "hdp_2.0.6" , example "hdp" DEFAULT Sections are tried in the order given above. Since the first two section naming forms refer to a specific template by name, options are added based on template type. However, the other section naming forms may map to node group templates or cluster templates, so options for both are added. ''' sections = list(CONF.list_all_sections()) unique_name = "{name}".format(**template) fullname = "{plugin_name}_{hadoop_version}_{name}".format(**template) plugin_version = "{plugin_name}_{hadoop_version}".format(**template) plugin = "{plugin_name}".format(**template) section_name = None if unique_name in sections: section_name = unique_name elif fullname in sections: section_name = fullname if section_name: if u.is_node_group(template): opts = node_group_template_opts else: opts = cluster_template_opts else: if plugin_version in sections: section_name = plugin_version elif plugin in sections: section_name = plugin opts = all_template_opts add_config_section(section_name, opts) return section_name def substitute_config_values(configs, template, path): if u.is_node_group(template): opt_names = node_group_template_opt_names else: opt_names = cluster_template_opt_names for opt, value in six.iteritems(configs): if opt in opt_names and opt in template: template[opt] = value def get_configs(section): if section is None: return dict(CONF) return dict(CONF[section]) def get_plugin_name(): if CONF.command.name == "update" and ( not CONF.command.plugin_name and ( hasattr(CONF, "plugins") and CONF.plugins)): return CONF.plugins return CONF.command.plugin_name def process_files(dirname, files): node_groups = [] clusters = [] plugin_name = get_plugin_name() try: for fname in files: if os.path.splitext(fname)[1] == ".json": fpath = os.path.join(dirname, fname) with open(fpath, 'r') as fp: try: data = fp.read() template = json.loads(data) except ValueError as e: LOG.warning("Error processing {path}, " "{reason}".format(path=fpath, reason=e)) raise Handled("error processing files") # If this file doesn't contain basic fields, skip it. # If we are filtering on plugin and version make # sure the file is one that we want if not u.check_basic_fields(template) or ( not u.check_plugin_name_and_version( template, plugin_name, CONF.command.plugin_version)): continue # Look through the sections in CONF and register # options for this template if we find a section # related to the template (ie, plugin, version, name) section = add_config_section_for_template(template) LOG.debug("Using config section {section} " "for {path}".format(section=section, path=fpath)) # Attempt to resolve substitutions using the config section substitute_config_values(get_configs(section), template, fpath) file_entry = {'template': template, 'path': fpath} if u.is_node_group(template): # JSON validator try: ng_validator.validate(template) except jsonschema.ValidationError as e: LOG.warning("Validation for {path} failed, " "{reason}".format(path=fpath, reason=e)) raise Handled( "node group template validation failed") node_groups.append(file_entry) LOG.debug("Added {path} to node group " "template files".format(path=fpath)) else: clusters.append(file_entry) LOG.debug("Added {path} to cluster template " "files".format(path=fpath)) except Handled as e: log_skipping_dir(dirname, str(e)) node_groups = [] clusters = [] except Exception as e: log_skipping_dir(dirname, "unhandled exception, {reason}".format(reason=e)) node_groups = [] clusters = [] return node_groups, clusters def delete_node_group_template(ctx, template, rollback=False): rollback_msg = " on rollback" if rollback else "" # If we are not deleting something that we just created, # do usage checks to ensure that the template is not in # use by a cluster or a cluster template if not rollback: clusters = conductor.API.cluster_get_all(ctx) cluster_templates = conductor.API.cluster_template_get_all(ctx) cluster_users, template_users = u.check_node_group_template_usage( template["id"], clusters, cluster_templates) if cluster_users: LOG.warning("Node group template {info} " "in use by clusters {clusters}".format( info=u.name_and_id(template), clusters=cluster_users)) if template_users: LOG.warning("Node group template {info} " "in use by cluster templates {templates}".format( info=u.name_and_id(template), templates=template_users)) if cluster_users or template_users: LOG.warning("Deletion of node group template " "{info} failed".format(info=u.name_and_id(template))) return try: conductor.API.node_group_template_destroy(ctx, template["id"], ignore_prot_on_def=True) except Exception as e: LOG.warning("Deletion of node group template {info} " "failed{rollback}, {reason}".format( info=u.name_and_id(template), reason=e, rollback=rollback_msg)) else: LOG.info("Deleted node group template {info}{rollback}".format( info=u.name_and_id(template), rollback=rollback_msg)) def reverse_node_group_template_creates(ctx, templates): for template in templates: delete_node_group_template(ctx, template, rollback=True) def reverse_node_group_template_updates(ctx, update_info): for template, values in update_info: # values are the original values that we overwrote in the update try: conductor.API.node_group_template_update(ctx, template["id"], values, ignore_prot_on_def=True) except Exception as e: LOG.warning("Rollback of update for node group " "template {info} failed, {reason}".format( info=u.name_and_id(template), reason=e)) else: LOG.info("Rolled back update for node group " "template {info}".format(info=u.name_and_id(template))) def add_node_group_templates(ctx, node_groups): error = False ng_info = {"ids": {}, "created": [], "updated": []} def do_reversals(ng_info): reverse_node_group_template_updates(ctx, ng_info["updated"]) reverse_node_group_template_creates(ctx, ng_info["created"]) return {}, True try: for ng in node_groups: template = ng['template'] current = u.find_node_group_template_by_name(ctx, template['name']) if current: # Track what we see in the current template that is different # from our update values. Save it for possible rollback. # Note, this is not perfect because it does not recurse through # nested structures to get an exact diff, but it ensures that # we track only fields that are valid in the JSON schema updated_fields = u.value_diff(current.to_dict(), template) # Always attempt to update. Since the template value is a # combination of JSON and config values, there is no useful # timestamp we can use to skip an update. # If sqlalchemy determines no change in fields, it will not # mark it as updated. try: template = conductor.API.node_group_template_update( ctx, current['id'], template, ignore_prot_on_def=True) except Exception as e: LOG.warning("Update of node group template {info} " "failed, {reason}".format( info=u.name_and_id(current), reason=e)) raise Handled() if template['updated_at'] != current['updated_at']: ng_info["updated"].append((template, updated_fields)) LOG.info("Updated node group template {info} " "from {path}".format(info=u.name_and_id(template), path=ng["path"])) else: LOG.debug("No change to node group template {info} " "from {path}".format( info=u.name_and_id(current), path=ng['path'])) else: template['is_default'] = True try: template = conductor.API.node_group_template_create( ctx, template) except Exception as e: LOG.warning("Creation of node group template " "from {path} failed, {reason}".format( path=ng['path'], reason=e)) raise Handled() ng_info["created"].append(template) LOG.info("Created node group template {info} " "from {path}".format(info=u.name_and_id(template), path=ng["path"])) # For the purposes of substitution we need a dict of id by name ng_info["ids"][template['name']] = template['id'] except Handled: ng_info, error = do_reversals(ng_info) except Exception as e: LOG.warning("Unhandled exception while processing " "node group templates, {reason}".format(reason=e)) ng_info, error = do_reversals(ng_info) return ng_info, error def delete_cluster_template(ctx, template, rollback=False): rollback_msg = " on rollback" if rollback else "" # If we are not deleting something that we just created, # do usage checks to ensure that the template is not in # use by a cluster if not rollback: clusters = conductor.API.cluster_get_all(ctx) cluster_users = u.check_cluster_template_usage(template["id"], clusters) if cluster_users: LOG.warning("Cluster template {info} " "in use by clusters {clusters}".format( info=u.name_and_id(template), clusters=cluster_users)) LOG.warning("Deletion of cluster template " "{info} failed".format(info=u.name_and_id(template))) return try: conductor.API.cluster_template_destroy(ctx, template["id"], ignore_prot_on_def=True) except Exception as e: LOG.warning("Deletion of cluster template {info} failed{rollback}" ", {reason}".format(info=u.name_and_id(template), reason=e, rollback=rollback_msg)) else: LOG.info("Deleted cluster template {info}{rollback}".format( info=u.name_and_id(template), rollback=rollback_msg)) def reverse_cluster_template_creates(ctx, templates): for template in templates: delete_cluster_template(ctx, template, rollback=True) def reverse_cluster_template_updates(ctx, update_info): for template, values in update_info: # values are the original values that we overwrote in the update try: conductor.API.cluster_template_update(ctx, template["id"], values, ignore_prot_on_def=True) except Exception as e: LOG.warning("Rollback of update for cluster " "template {info} failed, {reason}".format( info=u.name_and_id(template), reason=e)) else: LOG.info("Rolled back update for cluster " "template {info}".format(info=u.name_and_id(template))) def add_cluster_templates(ctx, clusters, ng_dict): '''Add cluster templates to the database. The value of any node_group_template_id fields in cluster templates which reference a node group template in ng_dict by name will be changed to the id of the node group template. If there is an error in creating or updating a template, any templates that have already been created will be delete and any updates will be reversed. :param clusters: a list of dictionaries. Each dictionary has a "template" entry holding the cluster template and a "path" entry holding the path of the file from which the template was read. :param ng_dict: a dictionary of node group template ids keyed by node group template names ''' error = False created = [] updated = [] def do_reversals(created, updated): reverse_cluster_template_updates(ctx, updated) reverse_cluster_template_creates(ctx, created) return True try: for cl in clusters: template = cl['template'] # Fix up node_group_template_id fields u.substitute_ng_ids(template, ng_dict) # Name + tenant_id is unique, so search by name current = u.find_cluster_template_by_name(ctx, template['name']) if current: # Track what we see in the current template that is different # from our update values. Save it for possible rollback. # Note, this is not perfect because it does not recurse through # nested structures to get an exact diff, but it ensures that # we track only fields that are valid in the JSON schema updated_fields = u.value_diff(current.to_dict(), template) # Always attempt to update. Since the template value is a # combination of JSON and config values, there is no useful # timestamp we can use to skip an update. # If sqlalchemy determines no change in fields, it will not # mark it as updated. # TODO(tmckay): why when I change the count in an # entry in node_groups does it not count as an update? # Probably a bug try: template = conductor.API.cluster_template_update( ctx, current['id'], template, ignore_prot_on_def=True) except Exception as e: LOG.warning("Update of cluster template {info} " "failed, {reason}".format( info=u.name_and_id(current), reason=e)) raise Handled() if template['updated_at'] != current['updated_at']: updated.append((template, updated_fields)) LOG.info("Updated cluster template {info} " "from {path}".format(info=u.name_and_id(template), path=cl['path'])) else: LOG.debug("No change to cluster template {info} " "from {path}".format(info=u.name_and_id(current), path=cl["path"])) else: template["is_default"] = True try: template = conductor.API.cluster_template_create(ctx, template) except Exception as e: LOG.warning("Creation of cluster template " "from {path} failed, {reason}".format( path=cl['path'], reason=e)) raise Handled() created.append(template) LOG.info("Created cluster template {info} " "from {path}".format(info=u.name_and_id(template), path=cl['path'])) except Handled: error = do_reversals(created, updated) except Exception as e: LOG.warning("Unhandled exception while processing " "cluster templates, {reason}".format(reason=e)) error = do_reversals(created, updated) return error def do_update(): '''Create or update default templates for the specified tenant. Looks for '.json' files beginning at the specified starting directory (--directory CLI option) and descending through subdirectories by default. The .json files represent cluster templates or node group templates. All '.json' files at the same directory level are treated as a set. Cluster templates may reference node group templates in the same set. If an error occurs in processing a set, skip it and continue. If creation of cluster templates fails, any node group templates in the set that were already created will be deleted. ''' ctx = Context(tenant_id=CONF.command.tenant_id) start_dir = os.path.abspath(CONF.command.directory) for root, dirs, files in os.walk(start_dir): # Find all the template files and identify them as node_group # or cluster templates. If there is an exception in # processing the set, return empty lists. node_groups, clusters = process_files(root, files) # Now that we know what the valid node group templates are, # we can validate the cluster templates as well. if check_cluster_templates_valid(node_groups, clusters): log_skipping_dir(root, "error processing cluster templates") # If there are existing default templates that match the names # in the template files, do usage checks here to detect update # failures early (we can't update a template in use) elif check_usage_of_existing(ctx, node_groups, clusters): log_skipping_dir(root, "templates in use") else: ng_info, error = add_node_group_templates(ctx, node_groups) if error: log_skipping_dir(root, "error processing node group templates") elif add_cluster_templates(ctx, clusters, ng_info["ids"]): log_skipping_dir(root, "error processing cluster templates") # Cluster templates failed so remove the node group templates reverse_node_group_template_updates(ctx, ng_info["updated"]) reverse_node_group_template_creates(ctx, ng_info["created"]) if CONF.command.norecurse: break def do_delete(): '''Delete default templates in the specified tenant Deletion uses the --plugin-name and --plugin-version options as filters. Only templates with 'is_default=True' will be deleted. ''' ctx = Context(tenant_id=CONF.command.tenant_id) for plugin in get_plugin_name(): kwargs = {'is_default': True} kwargs['plugin_name'] = plugin # Delete cluster templates first for the sake of usage checks lst = conductor.API.cluster_template_get_all(ctx, **kwargs) for l in lst: if not u.check_plugin_version(l, CONF.command.plugin_version): continue delete_cluster_template(ctx, l) lst = conductor.API.node_group_template_get_all(ctx, **kwargs) for l in lst: if not u.check_plugin_version(l, CONF.command.plugin_version): continue delete_node_group_template(ctx, l) def do_node_group_template_delete(): ctx = Context(tenant_id=CONF.command.tenant_id) template_name = CONF.command.template_name t = u.find_node_group_template_by_name(ctx, template_name) if t: delete_node_group_template(ctx, t) else: LOG.warning("Deletion of node group template {name} failed, " "no such template".format(name=template_name)) def do_node_group_template_delete_by_id(): ctx = Context(is_admin=True) # Make sure it's a default t = conductor.API.node_group_template_get(ctx, CONF.command.id) if t: if t["is_default"]: delete_node_group_template(ctx, t) else: LOG.warning("Deletion of node group template {info} skipped, " "not a default template".format( info=u.name_and_id(t))) else: LOG.warning("Deletion of node group template {id} failed, " "no such template".format(id=CONF.command.id)) def do_cluster_template_delete(): ctx = Context(tenant_id=CONF.command.tenant_id) template_name = CONF.command.template_name t = u.find_cluster_template_by_name(ctx, template_name) if t: delete_cluster_template(ctx, t) else: LOG.warning("Deletion of cluster template {name} failed, " "no such template".format(name=template_name)) def do_cluster_template_delete_by_id(): ctx = Context(is_admin=True) # Make sure it's a default t = conductor.API.cluster_template_get(ctx, CONF.command.id) if t: if t["is_default"]: delete_cluster_template(ctx, t) else: LOG.warning("Deletion of cluster template {info} skipped, " "not a default template".format( info=u.name_and_id(t))) else: LOG.warning("Deletion of cluster template {id} failed, " "no such template".format(id=CONF.command.id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/templates/cli.py0000664000175000017500000002013500000000000020024 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys from oslo_config import cfg from oslo_log import log import pkg_resources as pkg from sahara.db.templates import api from sahara import version LOG = log.getLogger(__name__) CONF = cfg.CONF def extra_option_checks(): if not CONF.database.connection: print("No database connection string was specified in configuration", file=sys.stderr) sys.exit(1) if CONF.command.name in ['update', 'delete']: if CONF.command.plugin_version and not CONF.command.plugin_name: print("The --plugin-version option is not valid " "without --plugin-name", file=sys.stderr) sys.exit(-1) if CONF.command.name == "update": # Options handling probably needs some refactoring in the future. # For now, though, we touch the conductor which ultimately touches # the plugins.base. Use the "plugins" option there as a default # list of plugins to process, since those are the plugins that # will be loaded by Sahara if not CONF.command.plugin_name: if "plugins" in CONF and CONF.plugins: LOG.info("Using plugin list {plugins} from " "config".format(plugins=CONF.plugins)) else: print("No plugins specified with --plugin-name " "or config", file=sys.stderr) sys.exit(-1) def add_command_parsers(subparsers): # Note, there is no 'list' command here because the client # or REST can be used for list operations. Default templates # will display, and templates will show the 'is_default' field. def add_id(parser): parser.add_argument('--id', required=True, help='The id of the default ' 'template to delete') def add_tenant_id(parser): parser.add_argument('-t', '--tenant_id', required=True, help='Tenant ID for database operations.') def add_name_and_tenant_id(parser): parser.add_argument('--name', dest='template_name', required=True, help='Name of the default template') add_tenant_id(parser) def add_plugin_name_and_version(parser, require_plugin_name=False): plugin_name_help = ('Only process templates containing ' 'a "plugin_name" field matching ' 'one of these values.') if not require_plugin_name: extra = (' The default list of plugin names ' 'is taken from the "plugins" parameter in ' 'the [DEFAULT] config section.') plugin_name_help += extra parser.add_argument('-p', '--plugin-name', nargs="*", required=require_plugin_name, help=plugin_name_help) parser.add_argument('-pv', '--plugin-version', nargs="*", help='Only process templates containing a ' '"hadoop_version" field matching one of ' 'these values. This option is ' 'only valid if --plugin-name is specified ' 'as well. A version specified ' 'here may optionally be prefixed with a ' 'plugin name and a dot, for example ' '"vanilla.1.2.1". Dotted versions only ' 'apply to the plugin named in the ' 'prefix. Versions without a prefix apply to ' 'all plugins.') fname = pkg.resource_filename(version.version_info.package, "plugins/default_templates") # update command parser = subparsers.add_parser('update', help='Update the default template set') parser.add_argument('-d', '--directory', default=fname, help='Template directory. Default is %s' % fname) parser.add_argument('-n', '--norecurse', action='store_true', help='Do not descend into subdirectories') add_plugin_name_and_version(parser) add_tenant_id(parser) parser.set_defaults(func=api.do_update) # delete command parser = subparsers.add_parser('delete', help='Delete default templates ' 'by plugin and version') add_plugin_name_and_version(parser, require_plugin_name=True) add_tenant_id(parser) parser.set_defaults(func=api.do_delete) # node-group-template-delete command parser = subparsers.add_parser('node-group-template-delete', help='Delete a default ' 'node group template by name') add_name_and_tenant_id(parser) parser.set_defaults(func=api.do_node_group_template_delete) # cluster-template-delete command parser = subparsers.add_parser('cluster-template-delete', help='Delete a default ' 'cluster template by name') add_name_and_tenant_id(parser) parser.set_defaults(func=api.do_cluster_template_delete) # node-group-template-delete-id command parser = subparsers.add_parser('node-group-template-delete-id', help='Delete a default ' 'node group template by id') add_id(parser) parser.set_defaults(func=api.do_node_group_template_delete_by_id) # cluster-template-delete-id command parser = subparsers.add_parser('cluster-template-delete-id', help='Delete a default ' 'cluster template by id') add_id(parser) parser.set_defaults(func=api.do_cluster_template_delete_by_id) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) CONF.register_cli_opt(command_opt) def unregister_extra_cli_opt(name): try: for cli in CONF._cli_opts: if cli['opt'].name == name: CONF.unregister_opt(cli['opt']) except Exception: pass # Remove a few extra CLI opts that we picked up via imports # Do this early so that they do not appear in the help for extra_opt in ["log-exchange", "host", "port"]: unregister_extra_cli_opt(extra_opt) def main(): # TODO(tmckay): Work on restricting the options # pulled in by imports which show up in the help. # If we find a nice way to do this the calls to # unregister_extra_cli_opt() can be removed CONF(project='sahara') # For some reason, this is necessary to clear cached values # and re-read configs. For instance, if this is not done # here the 'plugins' value will not reflect the value from # the config file on the command line CONF.reload_config_files() log.setup(CONF, "sahara") # If we have to enforce extra option checks, like one option # requires another, do it here extra_option_checks() # Since this may be scripted, record the command in the log # so a user can know exactly what was done LOG.info("Command: {command}".format(command=' '.join(sys.argv))) api.set_logger(LOG) api.set_conf(CONF) CONF.command.func() LOG.info("Finished {command}".format(command=CONF.command.name)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/db/templates/utils.py0000664000175000017500000001631000000000000020415 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import six from sahara import conductor def name_and_id(template): return "{name} ({id})".format(name=template["name"], id=template["id"]) def is_node_group(template): # Node group templates and cluster templates have # different required fields in validation and neither # allows additional fields. So, the presence of # node_processes or flavor_id should suffice to # identify a node group template. Check for both # to be nice, in case someone made a typo. return 'node_processes' in template or 'flavor_id' in template def substitute_ng_ids(cl, ng_dict): '''Substitute node group template ids for node group template names If the cluster template contains node group elements with node_group_template_id fields that reference node group templates by name, substitute the node group template id for the name. The name reference is expected to be a string containing a format specifier of the form "{name}", for example "{master}" :param cl: a cluster template :param ng_dict: a dictionary of node group template ids keyed by node group template names ''' for ng in cl["node_groups"]: if "node_group_template_id" in ng: val = ng["node_group_template_id"].format(**ng_dict) ng["node_group_template_id"] = val def check_basic_fields(template): return "plugin_name" in template and ( "hadoop_version" in template and ( "name" in template)) def check_plugin_version(template, plugin_versions): '''Check that the template matches the plugin versions list Tests whether or not the plugin version indicated by the template matches one of the versions specified in plugin_versions :param template: A node group or cluster template :param plugin_versions: A list of plugin version strings. These values may be regular version strings or may be the name of the plugin followed by a "." followed by a version string. :returns: True if the plugin version specified in the template matches a version in plugin_versions or plugin_versions is an empty list. Otherwise False ''' def dotted_name(template): return template['plugin_name'] + "." + template['hadoop_version'] version_matches = plugin_versions is None or ( template['hadoop_version'] in plugin_versions) or ( dotted_name(template) in plugin_versions) return version_matches def check_plugin_name_and_version(template, plugin_names, plugin_versions): '''Check that the template is for one of the specified plugins Tests whether or not the plugin name and version indicated by the template matches one of the names and one of the versions specified in plugin_names and plugin_versions :param template: A node group or cluster template :param plugin_names: A list of plugin names :param plugin_versions: A list of plugin version strings. These values may be regular version strings or may be the name of the plugin followed by a "." followed by a version string. :returns: True if the plugin name specified in the template matches a name in plugin_names or plugin_names is an empty list, and if the plugin version specified in the template matches a version in plugin_versions or plugin_versions is an empty list. Otherwise False ''' name_and_version_matches = (plugin_names is None or ( template['plugin_name'] in plugin_names)) and ( check_plugin_version(template, plugin_versions)) return name_and_version_matches # TODO(tmckay): refactor the service validation code so # that the node group template usage checks there can be reused # without incurring unnecessary dependencies def check_node_group_template_usage(node_group_template_id, cluster_list, cluster_template_list=None): cluster_template_list = cluster_template_list or [] cluster_users = [] template_users = [] for cluster in cluster_list: if (node_group_template_id in [node_group.node_group_template_id for node_group in cluster.node_groups]): cluster_users += [cluster.name] for cluster_template in cluster_template_list: if (node_group_template_id in [node_group.node_group_template_id for node_group in cluster_template.node_groups]): template_users += [cluster_template.name] return cluster_users, template_users # TODO(tmckay): refactor the service validation code so # that the cluster template usage checks there can be reused # without incurring unnecessary dependencies def check_cluster_template_usage(cluster_template_id, cluster_list): cluster_users = [] for cluster in cluster_list: if cluster_template_id == cluster.cluster_template_id: cluster_users.append(cluster.name) return cluster_users def find_node_group_template_by_name(ctx, name): t = conductor.API.node_group_template_get_all(ctx, name=name, is_default=True) if t: return t[0] return None def find_cluster_template_by_name(ctx, name): t = conductor.API.cluster_template_get_all(ctx, name=name, is_default=True) if t: return t[0] return None def value_diff(current, new_values): '''Return the entries in current that would be overwritten by new_values Returns the set of entries in current that would be overwritten if current.update(new_values) was called. :param current: A dictionary whose key values are a superset of the key values in new_values :param new_values: A dictionary ''' # Current is an existing template from the db and # template is a set of values that has been validated # against the JSON schema for the template. # Copy items from current if they are present in template. # In the case of "node_groups" the conductor does magic # to set up template relations and insures that appropriate # fields are cleaned (like "updated_at" and "id") so we # trust the conductor in that case. diff_values = {} for k, v in six.iteritems(new_values): if k in current and current[k] != v: diff_values[k] = copy.deepcopy(current[k]) return diff_values ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/exceptions.py0000664000175000017500000003064600000000000017063 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import string from oslo_utils import uuidutils import six from sahara.i18n import _ class SaharaException(Exception): """Base Exception for the project To correctly use this class, inherit from it and define a 'message' and 'code' properties. """ code = "UNKNOWN_EXCEPTION" message = _("An unknown exception occurred") def __str__(self): return self.message def __init__(self, message=None, code=None, inject_error_id=True): self.uuid = uuidutils.generate_uuid() if code: self.code = code if message: self.message = message if inject_error_id: # Add Error UUID to the message if required self.message = (_('%(message)s\nError ID: %(id)s') % {'message': self.message, 'id': self.uuid}) super(SaharaException, self).__init__( '%s: %s' % (self.code, self.message)) class NotFoundException(SaharaException): code = "NOT_FOUND" message_template = _("Object '%s' is not found") # It could be a various property of object which was not found def __init__(self, value, message_template=None): self.value = value if message_template: formatted_message = message_template % value else: formatted_message = self.message_template % value super(NotFoundException, self).__init__(formatted_message) class NoUniqueMatchException(SaharaException): code = "NO_UNIQUE_MATCH" message_template = _( "Response {response} is not unique for this query {query}.") def __init__(self, response, query, message_template=None): template = message_template or self.message_template formatted_message = template.format(response=response, query=query) super(NoUniqueMatchException, self).__init__(formatted_message) class NameAlreadyExistsException(SaharaException): code = "NAME_ALREADY_EXISTS" message = _("Name already exists") class InvalidCredentials(SaharaException): message = _("Invalid credentials") code = "INVALID_CREDENTIALS" class InvalidReferenceException(SaharaException): code = "INVALID_REFERENCE" message = _("Invalid object reference") class RemoteCommandException(SaharaException): code = "REMOTE_COMMAND_FAILED" message_template = _("Error during command execution: \"%s\"") def __init__(self, cmd, ret_code=None, stdout=None, stderr=None): self.cmd = cmd self.ret_code = ret_code self.stdout = stdout self.stderr = stderr formatted_message = self.message_template % cmd def to_printable(s): return "".join(filter(lambda x: x in string.printable, s)) if ret_code: formatted_message = '%s\nReturn code: %s' % ( formatted_message, six.text_type(ret_code)) if stderr: formatted_message = '%s\nSTDERR:\n%s' % ( formatted_message, to_printable(stderr)) if stdout: formatted_message = '%s\nSTDOUT:\n%s' % ( formatted_message, to_printable(stdout)) super(RemoteCommandException, self).__init__(formatted_message) class InvalidDataException(SaharaException): """General exception to use for invalid data A more useful message should be passed to __init__ which tells the user more about why the data is invalid. """ code = "INVALID_DATA" message = _("Data is invalid") class BadJobBinaryInternalException(SaharaException): code = "BAD_JOB_BINARY" message = _("Job binary internal data must be a string of length " "greater than zero") class BadJobBinaryException(SaharaException): code = "BAD_JOB_BINARY" message = _("To work with JobBinary located in internal swift add 'user'" " and 'password' to extra") class DBDuplicateEntry(SaharaException): code = "DB_DUPLICATE_ENTRY" message = _("Database object already exists") class CreationFailed(SaharaException): message = _("Object was not created") code = "CREATION_FAILED" class CancelingFailed(SaharaException): message = _("Operation was not canceled") code = "CANCELING_FAILED" class SuspendingFailed(SaharaException): message = _("Operation was not suspended") code = "SUSPENDING_FAILED" class InvalidJobStatus(SaharaException): message = _("Invalid Job Status") code = "INVALID_JOB_STATUS" class DeletionFailed(SaharaException): code = "DELETION_FAILED" message = _("Object was not deleted") class MissingFloatingNetworkException(SaharaException): code = "MISSING_FLOATING_NETWORK" message_template = _("Node Group %s is missing 'floating_ip_pool' " "field") def __init__(self, ng_name): formatted_message = self.message_template % ng_name super(MissingFloatingNetworkException, self).__init__( formatted_message) class SwiftClientException(SaharaException): '''General wrapper object for swift client exceptions This exception is intended for wrapping the message from a swiftclient.ClientException in a SaharaException. The ClientException should be caught and an instance of SwiftClientException raised instead. ''' code = "SWIFT_CLIENT_EXCEPTION" message = _("An error has occurred while performing a request to Swift") class S3ClientException(SaharaException): '''General wrapper object for boto exceptions Intended to replace any errors raised by the botocore client. ''' code = "S3_CLIENT_EXCEPTION" message = _("An error has occurred while performing a request to S3") class DataTooBigException(SaharaException): code = "DATA_TOO_BIG" message_template = _("Size of data (%(size)s) is greater than maximum " "(%(maximum)s)") def __init__(self, size, maximum, message_template=None): if message_template: self.message_template = message_template formatted_message = self.message_template % ( {'size': size, 'maximum': maximum}) super(DataTooBigException, self).__init__(formatted_message) class ThreadException(SaharaException): code = "THREAD_EXCEPTION" message_template = _("An error occurred in thread '%(thread)s': %(e)s" "\n%(stacktrace)s") def __init__(self, thread_description, e, stacktrace): formatted_message = self.message_template % { 'thread': thread_description, 'e': six.text_type(e), 'stacktrace': stacktrace} super(ThreadException, self).__init__(formatted_message) class SubprocessException(SaharaException): code = "SUBPROCESS_EXCEPTION" message = _("Subprocess execution has failed") class NotImplementedException(SaharaException): code = "NOT_IMPLEMENTED" message_template = _("Feature '%s' is not implemented") def __init__(self, feature, message_template=None): if message_template: self.message_template = message_template formatted_message = self.message_template % feature super(NotImplementedException, self).__init__(formatted_message) class HeatStackException(SaharaException): code = "HEAT_STACK_EXCEPTION" message_template = _("Heat stack failed with status %s") def __init__(self, heat_stack_status=None, message=None): if message: formatted_message = message elif heat_stack_status: formatted_message = self.message_template % heat_stack_status else: formatted_message = _("Heat stack failed") super(HeatStackException, self).__init__(formatted_message) class ConfigurationError(SaharaException): code = "CONFIGURATION_ERROR" message = _("The configuration has failed") class IncorrectStateError(SaharaException): message = _("The object is in an incorrect state") code = "INCORRECT_STATE_ERROR" class FrozenClassError(SaharaException): code = "FROZEN_CLASS_ERROR" message_template = _("Class %s is immutable!") def __init__(self, instance): formatted_message = self.message_template % type(instance).__name__ super(FrozenClassError, self).__init__(formatted_message) class SystemError(SaharaException): code = "SYSTEM_ERROR" message = _("System error has occurred") class EDPError(SaharaException): code = "EDP_ERROR" message = _("Failed to complete EDP operation") class OozieException(SaharaException): code = "OOZIE_EXCEPTION" message = _("Failed to perform Oozie request") class TimeoutException(SaharaException): code = "TIMEOUT" message_template = _("'%(operation)s' timed out after %(timeout)i " "second(s)") def __init__(self, timeout, op_name=None, timeout_name=None): if op_name: op_name = _("Operation with name '%s'") % op_name else: op_name = _("Operation") formatted_message = self.message_template % { 'operation': op_name, 'timeout': timeout} if timeout_name: desc = _("%(message)s and following timeout was violated: " "%(timeout_name)s") formatted_message = desc % { 'message': formatted_message, 'timeout_name': timeout_name} super(TimeoutException, self).__init__(formatted_message) class DeprecatedException(SaharaException): code = "DEPRECATED" message = _("The version you are trying to use is deprecated") class Forbidden(SaharaException): code = "FORBIDDEN" message = _("You are not authorized to complete this action") class ImageNotRegistered(SaharaException): code = "IMAGE_NOT_REGISTERED" message_template = _("Image %s is not registered in Sahara") def __init__(self, image): formatted_message = self.message_template % image super(ImageNotRegistered, self).__init__(formatted_message) class MalformedRequestBody(SaharaException): code = "MALFORMED_REQUEST_BODY" message_template = _("Malformed message body: %(reason)s") def __init__(self, reason): formatted_message = self.message_template % {"reason": reason} super(MalformedRequestBody, self).__init__(formatted_message) class QuotaException(SaharaException): code = "QUOTA_ERROR" message_template = _("Quota exceeded for %(resource)s: " "Requested %(requested)s, " "but available %(available)s") def __init__(self, resource, requested, available): formatted_message = self.message_template % { 'resource': resource, 'requested': requested, 'available': available} super(QuotaException, self).__init__(formatted_message) class UpdateFailedException(SaharaException): code = "UPDATE_FAILED" message_template = _("Object '%s' could not be updated") # Object was unable to be updated def __init__(self, value, message_template=None): if message_template: self.message_template = message_template formatted_message = self.message_template % value super(UpdateFailedException, self).__init__(formatted_message) class MaxRetriesExceeded(SaharaException): code = "MAX_RETRIES_EXCEEDED" message_template = _("Operation %(operation)s wasn't executed correctly " "after %(attempts)d attempts") def __init__(self, attempts, operation): formatted_message = self.message_template % {'operation': operation, 'attempts': attempts} super(MaxRetriesExceeded, self).__init__(formatted_message) class InvalidJobExecutionInfoException(SaharaException): message = _("Job execution information is invalid") def __init__(self, message=None): if message: self.message = message self.code = "INVALID_JOB_EXECUTION_INFO" super(InvalidJobExecutionInfoException, self).__init__() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/i18n.py0000664000175000017500000000161200000000000015450 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # It's based on oslo.i18n usage in OpenStack Keystone project and # recommendations from https://docs.openstack.org/oslo.i18n/latest/ # user/usage.html import oslo_i18n _translators = oslo_i18n.TranslatorFactory(domain='sahara') # The primary translation function using the well-known name "_" _ = _translators.primary ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/locale/0000775000175000017500000000000000000000000015556 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/locale/de/0000775000175000017500000000000000000000000016146 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.709891 sahara-16.0.0/sahara/locale/de/LC_MESSAGES/0000775000175000017500000000000000000000000017733 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/locale/de/LC_MESSAGES/sahara.po0000664000175000017500000013347400000000000021546 0ustar00zuulzuul00000000000000# Translations template for sahara. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the sahara project. # # Translators: # Carsten Duch , 2014 # Ettore Atalan , 2014-2015 # Robert Simai, 2014 # Andreas Jaeger , 2016. #zanata # Frank Kloeker , 2018. #zanata # Andreas Jaeger , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: sahara VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2020-04-23 21:26+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-04-25 10:36+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: German\n" msgid " on foreign key constraint" msgstr "auf Fremdschlüssel-Constraint" #, python-format msgid "\"%s\" child cannot be added to prepare element" msgstr "'%s' Kind kann nicht zum Vorbereiten des Elements hinzugefügt werden" #, python-format msgid "\"%s\" child cannot be added to streaming element" msgstr "" "'%s' untergeordnetes Element kann dem Streaming-Element nicht hinzugefügt " "werden" #, python-format msgid "%(fmt)s is not implemented for OS %(distrib)s" msgstr "%(fmt)s ist nicht für OS %(distrib)s implementiert" #, python-format msgid "" "%(message)s\n" "Error ID: %(id)s" msgstr "%(message)s Fehler-ID: %(id)s" #, python-format msgid "%(message)s and following timeout was violated: %(timeout_name)s" msgstr "%(message)s und folgendes Timeout wurde verletzt: %(timeout_name)s" #, python-format msgid "%(message)s, required by service: %(required_by)s" msgstr "%(message)s, wird vom Dienst benötigt: %(required_by)s" #, python-format msgid "%s flow does not use mains" msgstr "%s flow verwendet kein Netz" #, python-format msgid "%s flow requires libs" msgstr "%s flow benötigt libs" #, python-format msgid "%s flow requires main script" msgstr "%s flow benötigt Hauptscript" #, python-format msgid "%s is not a valid name" msgstr "%s ist kein gültiger Name" #, python-format msgid "%s job must specify edp.java.main_class" msgstr "%s-Job muss edp.java.main_class angeben" #, python-format msgid "%s job must specify streaming mapper and reducer" msgstr "%s-Job muss Streaming-Mapper und Reducer angeben" #, python-format msgid "%s job must specify topology_name" msgstr "%s-Job muss topology_name angeben" #, python-format msgid "%s job requires 'input_id' and 'output_id'" msgstr "%s job benötigt 'input_id' und 'output_id'" #, python-format msgid "%s job requires main application jar" msgstr "%s Job benötigt Hauptanwendung jar" #, python-format msgid "'%(operation)s' timed out after %(timeout)i second(s)" msgstr "" "'%(operation)s' hat eine Zeitüberschreitung nach %(timeout)i Sekunde(n)" #, python-format msgid "'%s' field is not found" msgstr "'%s' Feld wurde nicht gefunden" msgid "'limit' must be positive integer" msgstr "'limit' muss eine positive Ganzzahl sein" msgid "'mains' and 'libs' overlap" msgstr "'mains' und 'libs' überschneiden sich" msgid "0 or 1" msgstr "0 oder 1" msgid "Admin principal for existing KDC server" msgstr "Admin-Prinzipal für den vorhandenen KDC-Server" msgid "All instances are available" msgstr "Alle Instanzen sind verfügbar" msgid "All instances have correct '/etc/resolv.conf' file" msgstr "Alle Instanzen haben die richtige Datei '/etc/resolv.conf'" #, python-format msgid "All validations have failed: %s" msgstr "Alle Validierungen sind fehlgeschlagen:%s" msgid "All verifications are disabled" msgstr "Alle Überprüfungen sind deaktiviert" msgid "An error has occurred while performing a request to S3" msgstr "Beim Ausführen einer Anfrage an S3 ist ein Fehler aufgetreten" msgid "An error has occurred while performing a request to Swift" msgstr "Beim Ausführen einer Anforderung an Swift ist ein Fehler aufgetreten" #, python-format msgid "" "An error occurred in thread '%(thread)s': %(e)s\n" "%(stacktrace)s" msgstr "" "Im Thread '%(thread)s' ist ein Fehler aufgetreten: %(e)s\n" "%(stacktrace)s" msgid "" "An interface was specified with the template for this job. Please pass an " "interface map with this job (even if empty)." msgstr "" "Eine Schnittstelle wurde mit der Vorlage für diesen Job angegeben. Bitte " "übergeben Sie eine Schnittstellenkarte mit diesem Job (auch wenn sie leer " "ist)." msgid "An unknown exception occurred" msgstr "Eine unbekannte Ausnahme ist aufgetreten" #, python-format msgid "Appending to file \"%s\"" msgstr "Anhängen an Datei '%s'" #, python-format msgid "Appending to files \"%s\"" msgstr "Anhängen an Dateien '%s'" #, python-format msgid "" "Argument '%(name)s' was passed both through the interface and in location " "'%(mapping_type)s'.'%(location)s'. Please pass this through either the " "interface or the configuration maps, not both." msgstr "" "Das Argument '%(name)s' wurde sowohl über die Schnittstelle als auch an den " "Speicherort '%(mapping_type)s'. '%(location)s' übergeben. Bitte geben Sie " "dies entweder über die Schnittstelle oder die Konfigurationskarten ab, nicht " "beide." #, python-format msgid "Argument names: %s are required for this job." msgstr "Argumentnamen: %s sind für diesen Job erforderlich." #, python-format msgid "Argument names: %s were not found in the interface for this job." msgstr "" "Argumentnamen: %s wurde in der Schnittstelle für diesen Job nicht gefunden." msgid "Argument {name} is not required and must specify a default value." msgstr "" "Argument {name} ist nicht erforderlich und muss einen Standardwert angeben." msgid "Argument {name} is required for image processing." msgstr "Argument {name} wird für die Abbildverarbeitung benötigt." msgid "Argument {name} not found." msgstr "Argument {name} wurde nicht gefunden." msgid "Argument {name} specifies a default which is not one of its choices." msgstr "" "Argument {name} gibt einen Standardwert an, der nicht zu seinen " "Auswahlmöglichkeiten gehört." msgid "Assign IPs" msgstr "Weisen Sie IPs zu" msgid "Available plugins" msgstr "Verfügbare Plugins" msgid "Available versions" msgstr "Verfügbare Versionen" msgid "Await for volume become detached" msgstr "Warten das Datenträger gelöst werden" #, python-format msgid "Can't add new nodegroup. Cluster already has nodegroup with name '%s'" msgstr "" "Kann keine neue Knotengruppe hinzufügen. Der Cluster hat bereits eine " "Knotengruppe mit dem Namen '%s'" #, python-format msgid "" "Can't find applicable target '%(applicable_target)s' for '%(config_name)s'" msgstr "" "Das anwendbare Ziel '%(applicable_target)s' für '%(config_name)s konnte " "nicht gefunden werden" #, python-format msgid "Can't find config '%(config_name)s' in '%(applicable_target)s'" msgstr "Konnte '%(config_name)s' in '%(applicable_target)s' nicht finden" msgid "Can't update verification with other updates" msgstr "Die Überprüfung mit anderen Updates kann nicht aktualisiert werden" msgid "" "Cannot delete heat stack {name}, reason: stack status: {status}, status " "reason: {reason}" msgstr "" "Heat Stack {name} kann nicht gelöscht werden, Grund: Stack Status: {status}, " "Status Grund: {reason}" #, python-format msgid "Cannot verify cluster. Reason: %s" msgstr "Cluster kann nicht überprüft werden. Grund: %s" #, python-format msgid "Chosen node group %(ng_name)s cannot be scaled : %(reason)s" msgstr "" "Die ausgewählte Knotengruppe %(ng_name)s kann nicht skaliert werden: " "%(reason)s" #, python-format msgid "Cinder availability zone '%s' not found" msgstr "Die Cinder-Verfügbarkeitszone '%s' wurde nicht gefunden" msgid "Cinder is not supported" msgstr "Cinder wird nicht unterstützt" #, python-format msgid "Class %s is immutable!" msgstr "Klasse %s ist unveränderlich!" #, python-format msgid "Closing HTTP session for %(host)s:%(port)s" msgstr "Schließen der HTTP-Sitzung für %(host)s:%(port)s" #, python-format msgid "Cluster %(cluster_name)s cannot be scaled : %(reason)s" msgstr "Cluster %(cluster_name)s kann nicht skaliert werden: %(reason)s" #, python-format msgid "Cluster Provision Step id '%s' not found!" msgstr "Cluster Provision Schritt-ID '%s' nicht gefunden!" #, python-format msgid "" "Cluster Template id '%s' can not be updated. It is referenced by at least " "one cluster." msgstr "" "Die Clustervorlagen-ID '%s' kann nicht aktualisiert werden. Es wird von " "mindestens einem Cluster referenziert." #, python-format msgid "Cluster Template id '%s' not found!" msgstr "Cluster Template-ID '%s' wurde nicht gefunden!" #, python-format msgid "Cluster cannot be scaled not in 'Active' status. Cluster status: %s" msgstr "" "Cluster kann nicht skaliert werden, nicht im Status 'Active'. Clusterstatus: " "%s" #, python-format msgid "" "Cluster created before Juno release can't be scaled with %(engine)s engine" msgstr "" "Cluster, die vor der Veröffentlichung von Juno erstellt wurden, können nicht " "mit %(engine)s engine skaliert werden" #, python-format msgid "" "Cluster created with %(old_engine)s infrastructure engine can't be scaled " "with %(new_engine)s engine" msgstr "" "Cluster, der mit der Infrastruktur-Engine %(old_engine)s erstellt wurde, " "kann nicht mit der Engine %(new_engine)s skaliert werden" #, python-format msgid "Cluster does not support job type %s" msgstr "Cluster unterstützt den Jobtyp %s nicht" #, python-format msgid "Cluster doesn't contain node group with name '%s'" msgstr "Cluster enthält keine Knotengruppe mit dem Namen '%s'" msgid "Cluster has invalid topology: {description}" msgstr "Der Cluster hat eine ungültige Topologie: {description}" #, python-format msgid "Cluster health is %(status)s. Reason: %(reason)s" msgstr "Clustergesundheit ist %(status)s. Grund: %(reason)s" #, python-format msgid "Cluster id '%s' not found!" msgstr "Cluster-ID '%s' wurde nicht gefunden!" #, python-format msgid "Cluster is missing a service: %s" msgstr "Cluster fehlt ein Dienst: %s" msgid "Cluster is not active or doesn't exists" msgstr "Der Cluster ist nicht aktiv oder existiert nicht" #, python-format msgid "Cluster template %(id)s in use by %(clusters)s" msgstr "Cluster-Vorlage %(id)s wird von %(clusters)s verwendet" #, python-format msgid "Cluster template with id '%s' not found" msgstr "Clustervorlage mit der ID '%s' wurde nicht gefunden" #, python-format msgid "Cluster template with name '%s' already exists" msgstr "Cluster-Vorlage mit dem Namen '%s' existiert bereits" #, python-format msgid "Cluster verification in state %s" msgstr "Clusterverifizierung im Status %s" #, python-format msgid "" "Cluster with id '%(cluster_id)s' doesn't support job type '%(job_type)s'" msgstr "" "Cluster mit der ID '%(cluster_id)s' unterstützt den Jobtyp '%(job_type)s' " "nicht" #, python-format msgid "Cluster with id '%s' doesn't exist" msgstr "Cluster mit der ID '%s' existiert nicht" #, python-format msgid "Cluster with name '%s' already exists" msgstr "Cluster mit dem Namen '%s' existiert bereits" #, python-format msgid "" "Composite hostname %(host)s in provisioned cluster exceeds maximum limit " "%(limit)s characters" msgstr "" "Zusammengesetzter Hostname %(host)s im bereitgestellten Cluster " "überschreitet die maximale Anzahl der Zeichen %(limit)s" msgid "Configs 'accesskey', 'secretkey', and 'endpoint' must be provided." msgstr "" "Configs 'accesskey', 'secretkey' und 'endpoint' müssen zur Verfügung " "gestellt werden." msgid "Configure instances" msgstr "Konfigurieren Sie Instanzen" #, python-format msgid "Content type '%s' isn't supported" msgstr "Der Inhaltstyp '%s' wird nicht unterstützt" msgid "Context isn't available here" msgstr "Kontext ist hier nicht verfügbar" msgid "Couldn't read '/etc/resolv.conf' on instances: {}." msgstr "'/etc/resolv.conf' konnte nicht in Instanzen gelesen werden: '{}'." msgid "Create Heat stack" msgstr "Erstelle einen Heat-Stapel" msgid "Creating cluster failed for the following reason(s): {reason}" msgstr "" "Das Erstellen des Clusters ist aus folgenden Gründen fehlgeschlagen: {reason}" msgid "Currently Spark engine does not support scheduled EDP jobs" msgstr "Derzeit unterstützt Spark Engine keine geplanten EDP-Jobs" msgid "Currently Storm engine does not support scheduled EDP jobs" msgstr "Zurzeit unterstützt die Storm Engine keine geplanten EDP-Jobs" #, python-format msgid "Data Source deletion failed%s" msgstr "Löschen der Datenquelle fehlgeschlagen%s" #, python-format msgid "Data Source id '%s' not found!" msgstr "Datenquellen-ID '%s' wurde nicht gefunden!" msgid "Data is invalid" msgstr "Daten sind ungültig" msgid "Data source url must have a scheme" msgstr "Die Datenquellen-URL muss ein Schema haben" #, python-format msgid "" "Data source value '%s' is neither a valid data source ID nor a valid URL." msgstr "" "Der Datenquellenwert '%s' ist weder eine gültige Datenquellen-ID noch eine " "gültige URL." #, python-format msgid "Data source with name '%s' already exists" msgstr "Die Datenquelle mit dem Namen '%s' existiert bereits" #, python-format msgid "Data source with name '%s' already exists." msgstr "Die Datenquelle mit dem Namen '%s' existiert bereits." #, python-format msgid "Data sources couldn't be loaded: %s" msgstr "Datenquellen konnten nicht geladen werden: %s" #, python-format msgid "DataSource id '%s' not found" msgstr "Die DataSource-ID '%s' wurde nicht gefunden" msgid "DataSource is used in a PENDING Job and can not be updated." msgstr "" "DataSource wird in einem PENDING-Job verwendet und kann nicht aktualisiert " "werden." #, python-format msgid "DataSource with id '%s' doesn't exist" msgstr "DataSource mit der ID '%s' existiert nicht" msgid "Database object already exists" msgstr "Datenbankobjekt existiert bereits" msgid "Deploy KDC server" msgstr "Stellen Sie den KDC-Server bereit" #, python-format msgid "Duplicate entry for Cluster: %s" msgstr "Doppelter Eintrag für Cluster: %s" #, python-format msgid "Duplicate entry for ClusterTemplate: %s" msgstr "Doppelter Eintrag für ClusterTemplate: %s" #, python-format msgid "Duplicate entry for DataSource: %s" msgstr "Doppelter Eintrag für DataSource: %s" #, python-format msgid "Duplicate entry for Job: %s" msgstr "Doppelter Eintrag für Job: %s" #, python-format msgid "Duplicate entry for JobBinary: %s" msgstr "Doppelter Eintrag für JobBinary: %s" #, python-format msgid "Duplicate entry for JobBinaryInternal: %s" msgstr "Doppelter Eintrag für JobBinaryInternal: %s" #, python-format msgid "Duplicate entry for JobExecution: %s" msgstr "Doppelter Eintrag für JobExecution: %s" #, python-format msgid "Duplicate entry for NodeGroupTemplate: %s" msgstr "Doppelter Eintrag für NodeGroupTemplate: %s" msgid "Duplicate entry for instances to delete" msgstr "Doppelter Eintrag für zu löschende Instanzen" #, python-format msgid "Duplicate entry for object %(object)s. Failed on columns: %(columns)s" msgstr "" "Doppelter Eintrag für Objekt %(object)s. In Spalten fehlgeschlagen: " "%(columns)s" #, python-format msgid "Duplicates in node group names are detected: %s" msgstr "Duplikate in Knotengruppennamen werden erkannt: %s" #, python-format msgid "Duplicates in node processes have been detected: %s" msgstr "Duplikate in Knotenprozessen wurden erkannt: %s" msgid "Engine doesn't support suspending job feature" msgstr "Die Engine unterstützt das Aussetzen der Jobfunktion nicht" msgid "Engine: create cluster" msgstr "Engine: Cluster erstellen" msgid "Engine: rollback cluster" msgstr "Engine: Rollback-Cluster" msgid "Engine: scale cluster" msgstr "Motor: Cluster skalieren" msgid "Engine: shutdown cluster" msgstr "Engine: Herunterfahren des Clusters" #, python-format msgid "Error during command execution: \"%s\"" msgstr "Fehler bei der Ausführung des Befehls: '%s'" msgid "Error during suspending of job execution: {error}" msgstr "Fehler beim Aussetzen der Jobausführung: {error}" #, python-format msgid "Executing \"%s\"" msgstr "Ausführen von '%s'" #, python-format msgid "Executing interactively \"%s\"" msgstr "Interaktiv ausführen '%s'" #, python-format msgid "Failed to Provision Hadoop Cluster: %s" msgstr "Fehler beim Bereitstellen des Hadoop-Clusters: %s" msgid "Failed to complete EDP operation" msgstr "Der EDV-Vorgang konnte nicht abgeschlossen werden" msgid "Failed to create trust" msgstr "Fehler beim Erstellen von Vertrauen" msgid "Failed to decommission cluster" msgstr "Fehler beim Außerbetriebsetzen des Clusters" #, python-format msgid "Failed to delete temp dir %(dir)s (reason: %(reason)s)" msgstr "" "Löschen des temporären Verzeichnisses %(dir)s fehlgeschlagen (Grund: " "%(reason)s)" msgid "Failed to delete trust {0}" msgstr "Fehler beim Löschen der Vertrauensstellung {0}" #, python-format msgid "Failed to find domain %s" msgstr "Die Domäne %s konnte nicht gefunden werden" #, python-format msgid "Failed to find stack %(stack)s" msgstr "Der Stapel %(stack)s konnte nicht gefunden werden" #, python-format msgid "Failed to find user %s" msgstr "Der Benutzer%s konnte nicht gefunden werden" msgid "Failed to perform Oozie request" msgstr "Oozie-Anfrage konnte nicht ausgeführt werden" msgid "Failed to suspend job execution {jid}" msgstr "Die Ausführung des Jobs konnte nicht angehalten werden {jid}" #, python-format msgid "Feature '%s' is not implemented" msgstr "Feature '%s' ist nicht implementiert" #, python-format msgid "Floating IP pool %s not found" msgstr "Der Floating-IP-Pool %s wurde nicht gefunden" msgid "HDFS url is incorrect, cannot determine a hostname" msgstr "HDFS-URL ist falsch, kann keinen Hostnamen ermitteln" msgid "HDFS url must not be empty" msgstr "Die HDFS-URL darf nicht leer sein" #, python-format msgid "" "Hadoop cluster should contain %(expected_count)s %(component)s component(s). " "Actual %(component)s count is %(count)s" msgstr "" "Hadoop-Cluster sollte %(expected_count)s %(component)s-Komponente(n) " "enthalten. Die tatsächliche %(component)s Anzahl ist %(count)s" #, python-format msgid "Health check id '%s' not found!" msgstr "Gesundheitscheck-ID '%s' wurde nicht gefunden!" msgid "Health check timed out" msgstr "Zeitüberschreitung bei der Gesundheitsprüfung" msgid "Heat stack failed" msgstr "Heat Stack ist fehlgeschlagen" #, python-format msgid "Heat stack failed with status %s" msgstr "Heat Stack ist mit Status %s fehlgeschlagen" msgid "" "If this flag is set, no changes will be made to the image; instead, the " "script will fail if discrepancies are found between the image and the " "intended state." msgstr "" "Wenn dieses Flag gesetzt ist, werden keine Änderungen am Abbild vorgenommen. " "stattdessen wird das Skript fehlschlagen, wenn Diskrepanzen zwischen dem " "Abbild und dem beabsichtigten Zustand gefunden werden." #, python-format msgid "Image %s is not registered in Sahara" msgstr "Image %s ist in Sahara nicht registriert" msgid "Image generation for the {plugin} plugin" msgstr "Abbilderzeugung für das Plugin {plugin}" #, python-format msgid "Image has failed validation: %s" msgstr "Die Abildvalidierung ist fehlgeschlagen: %s" #, python-format msgid "Image validation spec is in error: %s" msgstr "Die Abbildvalidierungsspezifikation ist fehlerhaft: %s" #, python-format msgid "" "In file \"%(file)s\" replacing string \"%(old_string)s\" with " "\"%(new_string)s\"" msgstr "" "In der Datei '%(file)s' wird der String '%(old_string)s durch " "'%(new_string)s' ersetzt" msgid "Incorrect path" msgstr "Falscher Pfad" #, python-format msgid "Installing packages \"%s\"" msgstr "Pakete installieren '%s'" #, python-format msgid "Instance %s not found" msgstr "Instanz %s wurde nicht gefunden" #, python-format msgid "Instance id '%s' not found!" msgstr "Instanz-ID '%s' wurde nicht gefunden!" #, python-format msgid "Instances (%s) are not available in the cluster." msgstr "Instanzen (%s) sind im Cluster nicht verfügbar." msgid "" "Instances ({}) have incorrect '/etc/resolv.conf' file, expected nameservers: " "{}." msgstr "" "Instanzen ({}) haben eine falsche Datei '/etc/resolv.conf', erwartete " "Nameserver: {}." msgid "Internal data base url must not be empty" msgstr "Die interne Datenbank-URL darf nicht leer sein" msgid "Internal data base url netloc must be a uuid" msgstr "Die interne Datenbank-URL netloc muss eine UUID sein" msgid "Invalid Job Status" msgstr "Ungültiger Jobstatus" msgid "Invalid Time Format" msgstr "Ungültiges Zeitformat" msgid "Invalid credentials" msgstr "Ungültige Anmeldeinformationen" msgid "Invalid data source" msgstr "Ungültige Datenquelle" msgid "Invalid job binary" msgstr "Ungültige Job-Binärdatei" msgid "Invalid object reference" msgstr "Ungültige Objektreferenz" msgid "Invalid status parameter" msgstr "Ungültiger Statusparameter" msgid "" "It's a fake plugin that aimed to work on the CirrOS images. It doesn't " "install Hadoop. It's needed to be able to test provisioning part of Sahara " "codebase itself." msgstr "" "Es ist ein gefälschtes Plugin, das darauf abzielt, an den CirrOS-Abbildern " "zu arbeiten. Es installiert Hadoop nicht. Es wird benötigt, um den " "Provisioning-Teil der Sahara Codebase selbst testen zu können." msgid "" "Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy " "Files location" msgstr "" "Java Cryptography Extension (JCE) Unlimited Strength Jurisdiction Policy " "Files location" #, python-format msgid "" "Job Execution with id '%s' cannot be canceled because it's marked as " "protected" msgstr "" "Die Jobausführung mit der ID '%s' kann nicht abgebrochen werden, da sie als " "geschützt markiert ist" #, python-format msgid "Job binaries couldn't be loaded: %s" msgstr "Job-Binärdateien konnten nicht geladen werden: %s" #, python-format msgid "Job binary '%s' does not exist" msgstr "Die Job-Binärdatei '%s' existiert nicht" msgid "Job binary internal data must be a string of length greater than zero" msgstr "" "Interne Job-Binärdaten müssen eine Zeichenfolge mit einer Länge größer als " "Null sein" msgid "Job binary url must have a scheme" msgstr "Job-Binär-URL muss ein Schema haben" #, python-format msgid "Job binary with name '%s' already exists." msgstr "Die Job-Binärdatei mit dem Namen '%s' existiert bereits." #, python-format msgid "Job deletion failed%s" msgstr "Auftragslöschung fehlgeschlagen %s" #, python-format msgid "Job execution %s was not canceled" msgstr "Jobausführung %s wurde nicht abgebrochen" msgid "Job execution information is invalid" msgstr "Informationen zur Jobausführung sind ungültig" #, python-format msgid "" "Job execution with id '%s' cannot be canceled because it wasn't created in " "this tenant" msgstr "" "Die Jobausführung mit der ID '%s' kann nicht abgebrochen werden, weil sie in " "diesem Mandanten nicht erstellt wurde" #, python-format msgid "Job id '%s' not found!" msgstr "Job-ID '%s' wurde nicht gefunden!" msgid "Job start time should be later than now" msgstr "Job Startzeit sollte später als jetzt sein" #, python-format msgid "Job template with id '%s' doesn't exist" msgstr "Die Jobvorlage mit der ID '%s' existiert nicht" #, python-format msgid "Job with name '%s' already exists" msgstr "Der Job mit dem Namen '%s' existiert bereits" #, python-format msgid "JobBinary Id '%s' is used in a PENDING job and can not be updated." msgstr "" "Die JobBinary-ID '%s' wird in einem PENDING-Job verwendet und kann nicht " "aktualisiert werden." #, python-format msgid "JobBinary id '%s' not found" msgstr "JobBinary-ID '%s' wurde nicht gefunden" #, python-format msgid "JobBinary id '%s' not found!" msgstr "JobBinary ID '%s' nicht gefunden!" msgid "JobBinary is referenced and cannot be deleted" msgstr "JobBinary wird referenziert und kann nicht gelöscht werden" #, python-format msgid "JobBinaryInternal id '%s' not found!" msgstr "JobBinaryInternal ID '%s' nicht gefunden!" #, python-format msgid "JobBinaryInternal with id '%s' doesn't exist" msgstr "JobBinaryInternal mit der ID '%s' existiert nicht" #, python-format msgid "JobExecution id '%s' not found!" msgstr "JobExecution ID '%s' nicht gefunden!" msgid "KDC installation failed by reason: {reason}" msgstr "" "Die KDC-Installation ist aufgrund eines Fehlers fehlgeschlagen: {reason}" #, python-format msgid "" "Label '%s' can't be updated because it's not available for plugin or its " "version" msgstr "" "Das Label '%s' kann nicht aktualisiert werden, da es für das Plugin oder " "dessen Version nicht verfügbar ist" #, python-format msgid "Label '%s' can't be updated because it's not mutable" msgstr "" "Das Label '%s' kann nicht aktualisiert werden, da es nicht änderbar ist" msgid "" "Locations of positional arguments must be an unbroken integer sequence " "ascending from 0." msgstr "" "Positionen von Positionsargumenten müssen eine ununterbrochene Ganzzahlfolge " "sein, die von 0 aufsteigend ist." #, python-format msgid "Malformed message body: %(reason)s" msgstr "Fehlerhafter Nachrichtentext: %(reason)s" msgid "Manila url must not be empty" msgstr "Manila URL darf nicht leer sein" msgid "Manila url netloc must be a uuid" msgstr "Manila url netloc muss eine UUID sein" msgid "Manila url path must not be empty" msgstr "Der Manila-URL-Pfad darf nicht leer sein" msgid "Manila url scheme must be 'manila'" msgstr "Manila-URL-Schema muss 'manila' sein" msgid "MapR FS url must not be empty" msgstr "Die MapR FS-URL darf nicht leer sein" msgid "Mount volumes to {inst_name} instance" msgstr "Hänge Datenträger an Instanz {inst_name} an" msgid "Multiple shares cannot be mounted to the same path." msgstr "" "Mehrere Freigaben können nicht auf demselben Pfad bereitgestellt werden." msgid "Name already exists" msgstr "Name existiert bereits" msgid "Name must be unique within the interface for any job." msgstr "Name muss innerhalb der Schnittstelle für jeden Job eindeutig sein." #, python-format msgid "NameNode High Availability: %s" msgstr "NameNode Hochverfügbarkeit: %s" #, python-format msgid "Network %s not found" msgstr "Netzwerk %s wurde nicht gefunden" #, python-format msgid "Neutron router corresponding to network %s is not found" msgstr "Neutron Router entsprechend Netzwerk %s wurde nicht gefunden" msgid "No credentials provided for Swift" msgstr "Für Swift wurden keine Anmeldeinformationen angegeben" #, python-format msgid "Node Group %s is missing 'floating_ip_pool' field" msgstr "In der Knotengruppe %s fehlt das Feld 'floating_ip_pool'" #, python-format msgid "Node Group Template id '%s' not found!" msgstr "Knotengruppe Vorlagen-ID '%s' nicht gefunden!" #, python-format msgid "Node Group id '%s' not found!" msgstr "Knotengruppen-ID '%s' nicht gefunden!" #, python-format msgid "" "Node group template %(template)s is in use by cluster templates: %(users)s; " "and clusters: %(clusters)s" msgstr "" "Die Knotengruppenvorlage %(template)s wird von Clustervorlagen verwendet: " "%(users)s; und Cluster: %(clusters)s" #, python-format msgid "NodeGroup template with id '%s' not found" msgstr "NodeGroup-Vorlage mit der ID '%s' wurde nicht gefunden" #, python-format msgid "NodeGroup template with name '%s' already exists" msgstr "Die NodeGroup-Vorlage mit dem Namen '%s' existiert bereits" #, python-format msgid "" "NodeGroupTemplate id '%s' can not be updated. It is referenced by an " "existing cluster." msgstr "" "NodeGroupTemplate-ID '%s' kann nicht aktualisiert werden. Es wird von einem " "vorhandenen Cluster referenziert." #, python-format msgid "NodeGroupTemplate id '%s' not found" msgstr "NodeGroupTemplate-ID '%s' wurde nicht gefunden" msgid "Non-dict and non-empty kwargs passed to render" msgstr "Nicht diktierte und nicht leere Kwargs wurden zum Rendern übergeben" #, python-format msgid "Nova availability zone '%s' not found" msgstr "Die Nova-Verfügbarkeitszone '%s' wurde nicht gefunden" #, python-format msgid "" "Number of specific instances (%(instance)s) to delete can not be greater " "than the count difference (%(count)s during scaling" msgstr "" "Die Anzahl der zu löschenden spezifischen Instanzen (%(instance)s) darf " "nicht größer sein als die Zähldifferenz (%(count)s während der Skalierung)" #, python-format msgid "Object '%s' could not be updated" msgstr "Das Objekt '%s' konnte nicht aktualisiert werden" #, python-format msgid "Object '%s' is not found" msgstr "Das Objekt '%s' wurde nicht gefunden" msgid "Object was not created" msgstr "Objekt wurde nicht erstellt" msgid "Object was not deleted" msgstr "Objekt wurde nicht gelöscht" #, python-format msgid "Object with %s not found" msgstr "Objekt mit %s nicht gefunden" #, python-format msgid "" "Only mapping types %(mapping_types)s are allowed for job type %(job_type)s." msgstr "" "Für den Jobtyp %(job_type)s sind nur Mapping-Typen %(mapping_types)s " "zulässig." msgid "Operation" msgstr "Betrieb" #, python-format msgid "" "Operation %(operation)s wasn't executed correctly after %(attempts)d attempts" msgstr "" "Operation %(operation)s wurde nach %(attempts)d Versuchen nicht korrekt " "ausgeführt" msgid "Operation was not canceled" msgstr "Die Operation wurde nicht abgebrochen" msgid "Operation was not suspended" msgstr "Die Operation wurde nicht unterbrochen" #, python-format msgid "Operation with name '%s'" msgstr "Operation mit dem Namen '%s'" #, python-format msgid "Option '%(option)s' is required for config group '%(group)s'" msgstr "" "Option '%(option)s' ist für die Konfigurationsgruppe '%(group)s' erforderlich" msgid "Package Installation" msgstr "Paketinstallation" msgid "Password is not provided in credentials for Swift" msgstr "In den Anmeldedaten für Swift ist kein Passwort angegeben" msgid "Plugin" msgstr "Plugin" #, python-format msgid "Plugin %s is not enabled" msgstr "Plugin %s ist nicht aktiviert" #, python-format msgid "Plugin doesn't contain applicable target '%s'" msgstr "Plugin enthält kein anwendbares Ziel '%s'" #, python-format msgid "Plugin doesn't support the following node processes: %s" msgstr "Das Plugin unterstützt die folgenden Knotenprozesse nicht: %s" #, python-format msgid "Plugin name '%s' not found!" msgstr "Der Plugin-Name '%s' wurde nicht gefunden!" msgid "Plugin version" msgstr "Plugin-Version" #, python-format msgid "Plugin with name '%s' already exists." msgstr "Ein Plugin mit dem Namen '%s' existiert bereits." #, python-format msgid "" "Plugin's applicable target '%(target)s' doesn't contain config with name " "'%(name)s'" msgstr "" "Das anwendbare Ziel '%(target)s' des Plugins enthält keine Konfiguration mit " "dem Namen '%(name)s'" msgid "Plugin: configure cluster" msgstr "Plugin: Cluster konfigurieren" msgid "Plugin: decommission cluster" msgstr "Plugin: Außerbetriebnahme-Cluster" msgid "Plugin: scale cluster" msgstr "Plugin: Skalierungscluster" msgid "Plugin: shutdown cluster" msgstr "Plugin: Herunterfahren des Clusters" msgid "Plugin: start cluster" msgstr "Plugin: Cluster starten" msgid "" "Positional arguments must be given default values if they are not required." msgstr "" "Positionsargumente müssen Standardwerte erhalten, wenn sie nicht benötigt " "werden." msgid "Preparing policy files" msgstr "Vorbereiten von Richtliniendateien" msgid "Private key file hasn't been created" msgstr "Private Schlüsseldatei wurde nicht erstellt" #, python-format msgid "Provided input and output DataSources reference the same location: %s" msgstr "" "Bereitgestellte Eingabe- und Ausgabedatenquellen verweisen auf den gleichen " "Speicherort: %s" msgid "Proxy domain requested but not specified." msgstr "Proxy-Domäne angefordert, aber nicht angegeben." msgid "Public key file hasn't been created" msgstr "Die öffentliche Schlüsseldatei wurde nicht erstellt" #, python-format msgid "" "Quota exceeded for %(resource)s: Requested %(requested)s, but available " "%(available)s" msgstr "" "Quote überschritten für %(resource)s: Angeforderte %(requested)s, aber " "verfügbar %(available)s" msgid "RAM" msgstr "RAM" #, python-format msgid "Reading file \"%s\"" msgstr "Datei '%s' lesen" msgid "Remote conductor isn't implemented yet." msgstr "Remote Conductor ist noch nicht implementiert." msgid "" "Remote driver is not loaded. Most probably you see this error because you " "are running Sahara in distributed mode and it is broken.Try running sahara-" "all instead." msgstr "" "Der Remote-Treiber wurde nicht geladen. Höchstwahrscheinlich sehen Sie " "diesen Fehler, weil Sie Sahara im verteilten Modus ausführen und es kaputt " "ist. Versuchen Sie stattdessen, sahara-all auszuführen." msgid "Repository Update" msgstr "Repository-Aktualisierung" msgid "" "Requested RANDSTR length is too long, please choose a value less than 1024." msgstr "" "Angeforderte RANDSTR-Länge ist zu lang, bitte wählen Sie einen Wert kleiner " "als 1024." msgid "Requested RANDSTR length must be positive." msgstr "Die angeforderte RANDSTR-Länge muss positiv sein." #, python-format msgid "Requested flavor '%s' not found" msgstr "Angeforderte Variante '%s' nicht gefunden" #, python-format msgid "Requested image '%(image)s' doesn't contain required tags: %(tags)s" msgstr "" "Angefordertes Bild '%(image)s' enthält keine erforderlichen Tags: %(tags)s" #, python-format msgid "Requested image '%s' is not registered" msgstr "Angefordertes Abbild '%s' ist nicht registriert" #, python-format msgid "Requested keypair '%s' not found" msgstr "Angefordertes Schlüsselpaar '%s' wurde nicht gefunden" #, python-format msgid "Requested plugin '%(name)s' doesn't support version '%(version)s'" msgstr "" "Angefordertes Plugin '%(name)s' unterstützt die Version '%(version)s' nicht" #, python-format msgid "Requested plugin '%s' doesn't support cluster scaling feature" msgstr "" "Das angeforderte Plugin '%s' unterstützt keine Cluster-Skalierungsfunktion" #, python-format msgid "" "Requested plugin '%s' doesn't support converting config files to cluster " "templates" msgstr "" "Das angeforderte Plugin '%s' unterstützt keine Konvertierung von " "Konfigurationsdateien in Cluster-Vorlagen" #, python-format msgid "" "Requested share id %(id)s is of type %(type)s, which is not supported by " "Sahara." msgstr "" "Angeforderte Share-ID %(id)s hat den Typ %(type)s, der von Sahara nicht " "unterstützt wird." #, python-format msgid "Requested share id %s does not exist." msgstr "Angeforderte Freigabenummer %s ist nicht vorhanden." #, python-format msgid "ResourceManager High Availability: %s" msgstr "ResourceManager Hochverfügbarkeit: %s" msgid "Response {response} is not unique for this query {query}." msgstr "Antwort {response} ist nicht eindeutig für diese Abfrage {query}." msgid "S3 url must not be empty" msgstr "S3 URL darf nicht leer sein" #, python-format msgid "Sahara doesn't contain plugin with name '%s'" msgstr "Sahara enthält kein Plugin mit dem Namen '%s'" msgid "Sahara internal db is disabled for storing job binaries." msgstr "Sahara internal db ist zum Speichern von Job-Binärdateien deaktiviert." msgid "Scaling cluster failed for the following reason(s): {reason}" msgstr "Skalierungscluster ist aus folgenden Gründen fehlgeschlagen: {reason}" msgid "Scheduled job must specify start time" msgstr "Der geplante Job muss die Startzeit angeben" #, python-format msgid "Script %s not found in any resource roots." msgstr "Script %s wurde in keinem Ressourcenstamm gefunden." #, python-format msgid "Security group '%s' not found" msgstr "Die Sicherheitsgruppe '%s' wurde nicht gefunden" msgid "Server IP of KDC server when using existing KDC" msgstr "Server-IP des KDC-Servers bei Verwendung des vorhandenen KDC" msgid "Server or server ip are not provided" msgstr "Server- oder Server-IP wird nicht bereitgestellt" #, python-format msgid "Session for %(host)s:%(port)s not cached" msgstr "Sitzung für %(host)s:%(port)s nicht zwischengespeichert" msgid "Session type {type} not recognized" msgstr "Sitzungstyp {type} nicht erkannt" msgid "Setting Up Kerberos clients" msgstr "Einrichten von Kerberos-Clients" msgid "Setting up keytabs for users" msgstr "Keytabs für Benutzer einrichten" #, python-format msgid "Share with id %s was not found." msgstr "Freigabe mit ID %s wurde nicht gefunden." #, python-format msgid "Size of S3 object (%(size)sKB) is greater than maximum (%(maximum)sKB)" msgstr "" "Größe des S3-Objekts (%(size)sKB) ist größer als Maximum (%(maximum)sKB)" #, python-format msgid "Size of data (%(size)s) is greater than maximum (%(maximum)s)" msgstr "Größe der Daten (%(size)s) ist größer als Maximum (%(maximum)s)" #, python-format msgid "" "Size of internal binary (%(size)sKB) is greater than the maximum " "(%(maximum)sKB)" msgstr "" "Größe der internen Binärzahl (%(size)sKB) ist größer als die maximale " "%(maximum)sKB)" #, python-format msgid "" "Size of swift object (%(size)sKB) is greater than maximum (%(maximum)sKB)" msgstr "" "Größe des swift Objekts (%(size)sKB) ist größer als Maximum (%(maximum)sKB)" #, python-format msgid "" "Spark job execution failed. Exit status = %(status)s, stdout = %(stdout)s" msgstr "" "Die Ausführung des Spark-Jobs ist fehlgeschlagen. Exit status = %(status)s, " "stdout = %(stdout)s" msgid "Start the following process(es): {process}" msgstr "Starten Sie den/die folgenden Prozess(e): {process}" #, python-format msgid "" "Storm job execution failed. Exit status = %(status)s, stdout = %(stdout)s" msgstr "" "Storm-Jobausführung fehlgeschlagen. Exit status = %(status)s, stdout = " "%(stdout)s" msgid "Subprocess execution has failed" msgstr "Subprozessausführung ist fehlgeschlagen" msgid "" "Suspending operation can not be performed on an inactive or non-existent " "cluster" msgstr "" "Der Aussetzungsvorgang kann nicht für einen inaktiven oder nicht vorhandenen " "Cluster ausgeführt werden" msgid "Suspending operation can not be performed on status: {status}" msgstr "Der Aussetzvorgang kann nicht im Status ausgeführt werden: {status}" msgid "Swift url must not be empty" msgstr "Swift URL darf nicht leer sein" msgid "System error has occurred" msgstr "Systemfehler ist aufgetreten" msgid "Terminating cluster failed for the following reason(s): {reason}" msgstr "" "Das Beenden des Clusters ist aus folgenden Gründen fehlgeschlagen: {reason}" msgid "" "The combination of mapping type and location must be unique within the " "interface for any job." msgstr "" "Die Kombination aus Mapping-Typ und Standort muss innerhalb der " "Schnittstelle für jeden Job eindeutig sein." msgid "The configuration has failed" msgstr "Die Konfiguration ist fehlgeschlagen" msgid "" "The filesystem to mount as the root volume on the image. No value is " "required if only one filesystem is detected." msgstr "" "Das Dateisystem, das als Root-Volume im Abbild bereitgestellt werden soll. " "Kein Wert wird benötigt, wenn nur ein Dateisystem gefunden wird." msgid "The following argument names are reserved: {names}" msgstr "Die folgenden Argumentnamen sind reserviert: {names}" msgid "The name of realm to be used" msgstr "Der Name des zu verwendenden Bereichs" msgid "The object is in an incorrect state" msgstr "Das Objekt befindet sich in einem falschen Status" msgid "" "The path to an image to modify. This image will be modified in-place: be " "sure to target a copy if you wish to maintain a clean master image." msgstr "" "Der Pfad zu einem Abbild, das geändert werden soll. Dieses Bild wird direkt " "geändert: Achten Sie darauf, eine Kopie zu wählen, wenn Sie ein sauberes " "Master-Abbild beibehalten möchten." #, python-format msgid "" "The url for JobBinary Id '%s' can not be updated because it is an internal-" "db url." msgstr "" "Die URL für die JobBinary-ID '%s' kann nicht aktualisiert werden, da es sich " "um eine interne-db-URL handelt." msgid "The version you are trying to use is deprecated" msgstr "Die Version, die Sie verwenden möchten, ist veraltet" msgid "This API operation isn't implemented" msgstr "Diese API-Operation ist nicht implementiert" msgid "" "To work with JobBinary located in internal swift add 'user' and 'password' " "to extra" msgstr "" "Um mit JobBinary im internen swift zu arbeiten, fügen Sie 'user' und " "'password' hinzu" msgid "Token tenant != requested tenant" msgstr "Token-Mandant != Angeforderter Mandant" msgid "URL for binary in S3 must specify an object not a bucket" msgstr "" "Die URL für die Binärdatei in S3 muss ein Objekt und keinen Bucket angeben" #, python-format msgid "URL for binary in S3 must start with %s" msgstr "Die URL für die Binärdatei in S3 muss mit %s beginnen" #, python-format msgid "URL must be of the form swift://container%s/object" msgstr "Die URL muss das Format 'swift://container%s/object' haben" msgid "URL scheme must be 'hdfs'" msgstr "Das URL-Schema muss 'hdfs' lauten" msgid "URL scheme must be 'internal-db'" msgstr "Das URL-Schema muss 'internal-db' sein" msgid "URL scheme must be 'maprfs'" msgstr "Das URL-Schema muss 'maprfs' lauten" msgid "URL scheme must be 's3' or 's3a'" msgstr "Das URL-Schema muss 's3' oder 's3a' sein" msgid "URL scheme must be 'swift'" msgstr "Das URL-Schema muss 'swift' sein" #, python-format msgid "Unable to find file %s with compute topology" msgstr "Die Datei %s konnte mit der Computertopologie nicht gefunden werden" msgid "Unable to get kdc server start command" msgstr "Der Startbefehl des kdc-Servers konnte nicht abgerufen werden" #, python-format msgid "Unable to get parameter '%(param_name)s' from service %(service)s" msgstr "" "Der Parameter '%(param_name)s' konnte nicht vom Service %(service)s " "abgerufen werden" msgid "Unable to retrieve config details" msgstr "Die Konfigurationsdetails konnten nicht abgerufen werden" #, python-format msgid "Unexpected results found when searching for domain %s" msgstr "Beim Suchen nach Domäne %s wurden unerwartete Ergebnisse gefunden" #, python-format msgid "Unexpected results found when searching for user %s" msgstr "Beim Suchen nach Benutzer %s wurden unerwartete Ergebnisse gefunden" msgid "Unknown distro: cannot verify or install packages." msgstr "" "Unbekannte Distribution: Pakete können nicht überprüft oder installiert " "werden." #, python-format msgid "Unknown field for sorting %s" msgstr "Unbekanntes Feld zum Sortieren von %s" #, python-format msgid "Unknown file mode %s" msgstr "Unbekannter Dateimodus %s" #, python-format msgid "Unknown plugin version '%(version)s' of %(plugin)s" msgstr "Unbekannte Plugin-Version '%(version)s' von %(plugin)s" #, python-format msgid "Unsupported type: %s" msgstr "Nicht unterstützter Typ: %s" msgid "Updating repository" msgstr "Repository aktualisieren" msgid "Url for binary in internal swift must specify an object not a container" msgstr "" "URL für Binär in internem Swift muss ein Objekt nicht einen Container angeben" #, python-format msgid "Url for binary in internal swift must start with %s" msgstr "URL für Binär in internem Swift muss mit %s beginnen" msgid "User is not provided in credentials for Swift" msgstr "" "Der Benutzer wird in den Anmeldeinformationen für Swift nicht angegeben" msgid "VCPU" msgstr "VCPU" #, python-format msgid "Validator type %s not found." msgstr "Der Validierungstyp %s wurde nicht gefunden." #, python-format msgid "Value '%s' is not a valid number." msgstr "Der Wert '%s' ist keine gültige Zahl." #, python-format msgid "Value '%s' is not a valid string." msgstr "Der Wert '%s' ist keine gültige Zeichenfolge." msgid "Value for argument {name} must be one of {choices}." msgstr "Der Wert für Argument {name} muss einer von {choices} sein." #, python-format msgid "Verification id '%s' not found!" msgstr "Überprüfungs-ID '%s' wurde nicht gefunden!" #, python-format msgid "Version %(version)s of plugin %(plugin)s is not enabled" msgstr "Version %(version)s des Plugins %(plugin)s ist nicht aktiviert" #, python-format msgid "" "Volume size: %(volume_size)s GB should be greater than value of " "\"dfs_datanode_du_reserved\": %(reserved)s GB" msgstr "" "Volume size: %(volume_size)s GB sollte größer sein als der Wert von " "'dfs_datanode_du_reserved': %(reserved)s GB" #, python-format msgid "Volume type '%s' not found" msgstr "Der Datenträger-Typ '%s' wurde nicht gefunden" msgid "Wait for instance accessibility" msgstr "Warten Sie auf Barrierefreiheit" #, python-format msgid "Was not able to find compute node topology for VM %s" msgstr "Es konnte keine Rechenknotentopologie für VM%s gefunden werden" #, python-format msgid "Writing file \"%s\"" msgstr "Datei '%s' schreiben" #, python-format msgid "Writing files \"%s\"" msgstr "Dateien schreiben '%s'" msgid "You are not authorized to complete this action" msgstr "Sie sind nicht berechtigt, diese Aktion abzuschließen" msgid "" "You can only reference instances by Name or UUID, not both on the same " "request" msgstr "" "Sie können Instanzen nur nach Name oder UUID referenzieren, nicht beide auf " "derselben Anfrage" msgid "You must provide a revision or relative delta" msgstr "Sie müssen eine Revision oder ein relatives Delta angeben" #, python-format msgid "You must specify a %s value for your plugin_name" msgstr "Sie müssen einen %s-Wert für Ihren plugin_name angeben" msgid "You must specify a volumes_size parameter" msgstr "Sie müssen einen volumes_size-Parameter angeben" msgid "cannot understand JSON" msgstr "kann JSON nicht verstehen" msgid "floating ip" msgstr "Floating IP" msgid "instance" msgstr "Beispiel" msgid "port" msgstr "port" msgid "security group" msgstr "Sicherheitsgruppe" msgid "security group rule" msgstr "Sicherheitsgruppenregel" msgid "volume" msgstr "Datenträger" msgid "volume storage" msgstr "Datenträgerspeicher" #, python-format msgid "" "{object} with id '%s' could not be updated because it wasn't created in this " "tenant" msgstr "" "{object} mit der ID '%s' konnte nicht aktualisiert werden, da es in diesem " "Mandanten nicht erstellt wurde" #, python-format msgid "" "{object} with id '%s' could not be updated because it's marked as protected" msgstr "" "{object} mit der ID '%s' konnte nicht aktualisiert werden, da es als " "geschützt markiert ist" msgid "" "{object} with id '{id}' could not be deleted because it wasn't created in " "this tenant" msgstr "" "{object} mit der ID '{id}' konnte nicht gelöscht werden, da es in diesem " "Mandanten nicht erstellt wurde" msgid "" "{object} with id '{id}' could not be deleted because it's marked as protected" msgstr "" "{object} mit der ID '{id}' konnte nicht gelöscht werden, da es als geschützt " "markiert ist" msgid "{plugin} version {version}" msgstr "{plugin} Version {version}" msgid "{} {}" msgstr "{} {}" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/main.py0000664000175000017500000001142100000000000015614 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import os from oslo_config import cfg from oslo_log import log from oslo_service import service as oslo_service from oslo_service import sslutils from oslo_service import wsgi as oslo_wsgi import stevedore from sahara.api import acl from sahara.common import config as common_config from sahara import config from sahara import context from sahara.plugins import base as plugins_base from sahara.service import api from sahara.service.castellan import config as castellan from sahara.service.edp.data_sources import manager as ds_manager from sahara.service.edp.job_binaries import manager as jb_manager from sahara.service import ops as service_ops from sahara.service import periodic from sahara.utils.openstack import cinder from sahara.utils.openstack import keystone from sahara.utils import remote from sahara.utils import rpc as messaging LOG = log.getLogger(__name__) opts = [ cfg.StrOpt('os_region_name', help='Region name used to get services endpoints.'), cfg.StrOpt('remote', default='ssh', help='A method for Sahara to execute commands ' 'on VMs.'), cfg.IntOpt('api_workers', default=1, help="Number of workers for Sahara API service (0 means " "all-in-one-thread configuration)."), ] INFRASTRUCTURE_ENGINE = 'heat' CONF = cfg.CONF CONF.register_opts(opts) class SaharaWSGIService(oslo_wsgi.Server): def __init__(self, service_name, app): super(SaharaWSGIService, self).__init__( CONF, service_name, app, host=CONF.host, port=CONF.port, use_ssl=sslutils.is_enabled(CONF)) def setup_common(possible_topdir, service_name): dev_conf = os.path.join(possible_topdir, 'etc', 'sahara', 'sahara.conf') config_files = None if os.path.exists(dev_conf): config_files = [dev_conf] config.parse_configs(config_files) common_config.set_config_defaults() log.setup(CONF, "sahara") # Validate other configurations (that may produce logs) here cinder.validate_config() keystone.validate_config() validate_castellan_config() messaging.setup(service_name) plugins_base.setup_plugins() ds_manager.setup_data_sources() jb_manager.setup_job_binaries() LOG.info('Sahara {service} started'.format(service=service_name)) def validate_castellan_config(): with admin_context(): castellan.validate_config() def setup_sahara_api(mode): ops = _get_ops_driver(mode) api.setup_api(ops) def setup_sahara_engine(): periodic.setup() engine = _get_infrastructure_engine() service_ops.setup_ops(engine) remote_driver = _get_remote_driver() remote.setup_remote(remote_driver, engine) def setup_auth_policy(): acl.setup_policy() def make_app(): app_loader = oslo_wsgi.Loader(CONF) return app_loader.load_app("sahara") def _load_driver(namespace, name): extension_manager = stevedore.DriverManager( namespace=namespace, name=name, invoke_on_load=True ) LOG.info("Driver {name} successfully loaded".format(name=name)) return extension_manager.driver def _get_infrastructure_engine(): """Import and return one of sahara.service.*_engine.py modules.""" LOG.debug("Infrastructure engine {engine} is loading".format( engine=INFRASTRUCTURE_ENGINE)) return _load_driver('sahara.infrastructure.engine', INFRASTRUCTURE_ENGINE) def _get_remote_driver(): LOG.debug("Remote {remote} is loading".format(remote=CONF.remote)) return _load_driver('sahara.remote', CONF.remote) def _get_ops_driver(driver_name): LOG.debug("Ops {driver} is loading".format(driver=driver_name)) return _load_driver('sahara.run.mode', driver_name) def get_process_launcher(): return oslo_service.ProcessLauncher(CONF, restart_method='mutate') def launch_api_service(launcher, service): launcher.launch_service(service, workers=CONF.api_workers) service.start() launcher.wait() @contextlib.contextmanager def admin_context(): ctx = context.get_admin_context() context.set_ctx(ctx) try: yield finally: context.set_ctx(None) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.713891 sahara-16.0.0/sahara/plugins/0000775000175000017500000000000000000000000016000 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/__init__.py0000664000175000017500000000000000000000000020077 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/base.py0000664000175000017500000001125400000000000017267 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_config import cfg from oslo_log import log as logging import six from stevedore import enabled from sahara import conductor as cond from sahara import exceptions as ex from sahara.i18n import _ from sahara.plugins import labels from sahara.utils import resources conductor = cond.API LOG = logging.getLogger(__name__) CONF = cfg.CONF def required(fun): return abc.abstractmethod(fun) def required_with_default(fun): return fun def optional(fun): fun.__not_implemented__ = True return fun @six.add_metaclass(abc.ABCMeta) class PluginInterface(resources.BaseResource): __resource_name__ = 'plugin' name = 'plugin_interface' @required def get_title(self): """Plugin title For example: "Vanilla Provisioning" """ pass @required_with_default def get_description(self): """Optional description of the plugin This information is targeted to be displayed in UI. """ pass def to_dict(self): return { 'name': self.name, 'title': self.get_title(), 'description': self.get_description(), } class PluginManager(object): def __init__(self): self.plugins = {} self.default_label_schema = {} self._load_cluster_plugins() self.label_handler = labels.LabelHandler(self.plugins) def _load_cluster_plugins(self): config_plugins = CONF.plugins extension_manager = enabled.EnabledExtensionManager( check_func=lambda ext: ext.name in config_plugins, namespace='sahara.cluster.plugins', invoke_on_load=True ) for ext in extension_manager.extensions: if ext.name in self.plugins: raise ex.ConfigurationError( _("Plugin with name '%s' already exists.") % ext.name) ext.obj.name = ext.name self.plugins[ext.name] = ext.obj LOG.info("Plugin {plugin_name} loaded {entry_point}".format( plugin_name=ext.name, entry_point=ext.entry_point_target)) if len(self.plugins) < len(config_plugins): self.loaded_plugins = set(six.iterkeys(self.plugins)) requested_plugins = set(config_plugins) LOG.warning("Plugins couldn't be loaded: %s", ", ".join(requested_plugins - self.loaded_plugins)) def get_plugins(self, serialized=False): if serialized: return [self.serialize_plugin(name) for name in PLUGINS.plugins] return [self.get_plugin(name) for name in PLUGINS.plugins] def get_plugin(self, plugin_name): return self.plugins.get(plugin_name) def is_plugin_implements(self, plugin_name, fun_name): plugin = self.get_plugin(plugin_name) fun = getattr(plugin, fun_name) if not (fun and callable(fun)): return False return not hasattr(fun, '__not_implemented__') def serialize_plugin(self, plugin_name, version=None): plugin = self.get_plugin(plugin_name) if plugin: res = plugin.as_resource() res._info.update(self.label_handler.get_label_full_details( plugin_name)) if version: if version in plugin.get_versions(): res._info.update(plugin.get_version_details(version)) else: return None return res def update_plugin(self, plugin_name, values): self.label_handler.update_plugin(plugin_name, values) return self.serialize_plugin(plugin_name) def validate_plugin_update(self, plugin_name, values): return self.label_handler.validate_plugin_update(plugin_name, values) def get_plugin_update_validation_jsonschema(self): return self.label_handler.get_plugin_update_validation_jsonschema() def validate_plugin_labels(self, plugin, version): self.label_handler.validate_plugin_labels(plugin, version) PLUGINS = None def setup_plugins(): global PLUGINS PLUGINS = PluginManager() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/castellan_utils.py0000664000175000017500000000163700000000000021547 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.castellan import utils as castellan_utils def delete_secret(id, ctx=None, **kwargs): castellan_utils.delete_secret(id, ctx=ctx) def get_secret(id, ctx=None, **kwargs): return castellan_utils.get_secret(id, ctx=ctx) def store_secret(secret, ctx=None, **kwargs): return castellan_utils.store_secret(secret) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/conductor.py0000664000175000017500000000221100000000000020346 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor conductor = conductor.API def cluster_get(context, cluster_id, **kwargs): return conductor.cluster_get(context, cluster_id) def cluster_update(context, cluster, values, **kwargs): return conductor.cluster_update(context, cluster, values) def cluster_create(context, values, **kwargs): return conductor.cluster_create(context, values) def plugin_create(context, values, **kwargs): return conductor.plugin_create(context, values) def plugin_remove(context, name, **kwargs): return conductor.plugin_remove(context, name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/context.py0000664000175000017500000000423300000000000020040 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import context def ctx(**kwargs): return context.ctx() def set_ctx(new_ctx, **kwargs): context.set_ctx(new_ctx) def has_ctx(**kwargs): return context.has_ctx() def sleep(seconds=0, **kwargs): context.sleep(seconds) def current(**kwargs): return context.current() def set_current_instance_id(instance_id, **kwargs): return context.set_current_instance_id(instance_id) class PluginsThreadGroup(context.ThreadGroup): def __init__(self, thread_pool_size=1000, **kwargs): super(PluginsThreadGroup, self).__init__() class PluginsContext(context.Context): def __init__(self, user_id=None, tenant_id=None, auth_token=None, service_catalog=None, username=None, tenant_name=None, roles=None, is_admin=None, remote_semaphore=None, resource_uuid=None, current_instance_info=None, request_id=None, auth_plugin=None, overwrite=True, **kwargs): super(PluginsContext, self).__init__(auth_token=auth_token, user=user_id, tenant=tenant_id, is_admin=is_admin, resource_uuid=resource_uuid, request_id=request_id, roles=roles, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/db.py0000664000175000017500000000143200000000000016737 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.db import api as db_api def setup_db(**kwargs): db_api.setup_db() def drop_db(**kwargs): db_api.drop_db() def create_facade_lazily(**kwargs): return db_api._create_facade_lazily() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.713891 sahara-16.0.0/sahara/plugins/default_templates/0000775000175000017500000000000000000000000021502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/plugins/default_templates/ambari/0000775000175000017500000000000000000000000022735 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.713891 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_3/0000775000175000017500000000000000000000000023506 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_3/cluster.json0000664000175000017500000000123400000000000026062 0ustar00zuulzuul00000000000000{ "plugin_name": "hdp", "hadoop_version": "2.3", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "{hdp-23-default-worker}" }, { "name": "master-edp", "count": 1, "node_group_template_id": "{hdp-23-default-master-edp}" }, { "name": "master", "count": 1, "node_group_template_id": "{hdp-23-default-master}" } ], "name": "hdp-23-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_3/master-edp.json0000664000175000017500000000061700000000000026446 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.3", "node_processes": [ "Hive Metastore", "HiveServer", "Oozie" ], "name": "hdp-23-default-master-edp", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_3/master.json0000664000175000017500000000107000000000000025672 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.3", "node_processes": [ "Ambari", "MapReduce History Server", "Spark History Server", "NameNode", "ResourceManager", "SecondaryNameNode", "YARN Timeline Server", "ZooKeeper", "Kafka Broker" ], "name": "hdp-23-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_3/worker.json0000664000175000017500000000064600000000000025720 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.3", "node_processes": [ "DataNode", "NodeManager" ], "name": "hdp-23-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "volume_size": 2, "volumes_per_node": 2, "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.713891 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_4/0000775000175000017500000000000000000000000023507 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_4/cluster.json0000664000175000017500000000123700000000000026066 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.4", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "{hdp-24-default-worker}" }, { "name": "master-edp", "count": 1, "node_group_template_id": "{hdp-24-default-master-edp}" }, { "name": "master", "count": 1, "node_group_template_id": "{hdp-24-default-master}" } ], "name": "hdp-24-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_4/master-edp.json0000664000175000017500000000061700000000000026447 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.4", "node_processes": [ "Hive Metastore", "HiveServer", "Oozie" ], "name": "hdp-24-default-master-edp", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_4/master.json0000664000175000017500000000107100000000000025674 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.4", "node_processes": [ "Ambari", "MapReduce History Server", "Spark History Server", "NameNode", "ResourceManager", "SecondaryNameNode", "YARN Timeline Server", "ZooKeeper", "Kafka Broker" ], "name": "hdp-24-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_4/worker.json0000664000175000017500000000064600000000000025721 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.4", "node_processes": [ "DataNode", "NodeManager" ], "name": "hdp-24-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "volume_size": 2, "volumes_per_node": 2, "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.713891 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_5/0000775000175000017500000000000000000000000023510 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_5/cluster.json0000664000175000017500000000123700000000000026067 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.5", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "{hdp-25-default-worker}" }, { "name": "master-edp", "count": 1, "node_group_template_id": "{hdp-25-default-master-edp}" }, { "name": "master", "count": 1, "node_group_template_id": "{hdp-25-default-master}" } ], "name": "hdp-25-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_5/master-edp.json0000664000175000017500000000061700000000000026450 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.5", "node_processes": [ "Hive Metastore", "HiveServer", "Oozie" ], "name": "hdp-25-default-master-edp", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_5/master.json0000664000175000017500000000107100000000000025675 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.5", "node_processes": [ "Ambari", "MapReduce History Server", "Spark History Server", "NameNode", "ResourceManager", "SecondaryNameNode", "YARN Timeline Server", "ZooKeeper", "Kafka Broker" ], "name": "hdp-25-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/ambari/v2_5/worker.json0000664000175000017500000000064600000000000025722 0ustar00zuulzuul00000000000000{ "plugin_name": "ambari", "hadoop_version": "2.5", "node_processes": [ "DataNode", "NodeManager" ], "name": "hdp-25-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "volume_size": 2, "volumes_per_node": 2, "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/plugins/default_templates/cdh/0000775000175000017500000000000000000000000022240 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.717891 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_5_0/0000775000175000017500000000000000000000000023235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_5_0/cluster.json0000664000175000017500000000155200000000000025614 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.5.0", "node_groups": [ { "name": "worker-nodemanager-datanode", "count": 3, "node_group_template_id": "{cdh-550-default-nodemanager-datanode}" }, { "name": "manager", "count": 1, "node_group_template_id": "{cdh-550-default-manager}" }, { "name": "master-core", "count": 1, "node_group_template_id": "{cdh-550-default-master-core}" }, { "name": "master-additional", "count": 1, "node_group_template_id": "{cdh-550-default-master-additional}" } ], "name": "cdh-550-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_5_0/manager.json0000664000175000017500000000056500000000000025550 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.5.0", "node_processes": [ "CLOUDERA_MANAGER", "KMS" ], "name": "cdh-550-default-manager", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_5_0/master-additional.json0000664000175000017500000000103400000000000027527 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.5.0", "node_processes": [ "OOZIE_SERVER", "YARN_JOBHISTORY", "YARN_NODEMANAGER", "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", "HIVE_SERVER2", "SPARK_YARN_HISTORY_SERVER" ], "name": "cdh-550-default-master-additional", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_5_0/master-core.json0000664000175000017500000000073000000000000026351 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.5.0", "node_processes": [ "HDFS_NAMENODE", "YARN_RESOURCEMANAGER", "SENTRY_SERVER", "YARN_NODEMANAGER", "ZOOKEEPER_SERVER" ], "name": "cdh-550-default-master-core", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_5_0/worker-nm-dn.json0000664000175000017500000000067400000000000026457 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.5.0", "node_processes": [ "HDFS_DATANODE", "YARN_NODEMANAGER" ], "name": "cdh-550-default-nodemanager-datanode", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "volume_size": 2, "volumes_per_node": 2, "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.717891 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_7_0/0000775000175000017500000000000000000000000023237 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_7_0/cluster.json0000664000175000017500000000155200000000000025616 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.7.0", "node_groups": [ { "name": "worker-nodemanager-datanode", "count": 3, "node_group_template_id": "{cdh-570-default-nodemanager-datanode}" }, { "name": "manager", "count": 1, "node_group_template_id": "{cdh-570-default-manager}" }, { "name": "master-core", "count": 1, "node_group_template_id": "{cdh-570-default-master-core}" }, { "name": "master-additional", "count": 1, "node_group_template_id": "{cdh-570-default-master-additional}" } ], "name": "cdh-570-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_7_0/manager.json0000664000175000017500000000056500000000000025552 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.7.0", "node_processes": [ "CLOUDERA_MANAGER", "KMS" ], "name": "cdh-570-default-manager", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_7_0/master-additional.json0000664000175000017500000000103400000000000027531 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.7.0", "node_processes": [ "OOZIE_SERVER", "YARN_JOBHISTORY", "YARN_NODEMANAGER", "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", "HIVE_SERVER2", "SPARK_YARN_HISTORY_SERVER" ], "name": "cdh-570-default-master-additional", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_7_0/master-core.json0000664000175000017500000000073000000000000026353 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.7.0", "node_processes": [ "HDFS_NAMENODE", "YARN_RESOURCEMANAGER", "SENTRY_SERVER", "YARN_NODEMANAGER", "ZOOKEEPER_SERVER" ], "name": "cdh-570-default-master-core", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_7_0/worker-nm-dn.json0000664000175000017500000000067400000000000026461 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.7.0", "node_processes": [ "HDFS_DATANODE", "YARN_NODEMANAGER" ], "name": "cdh-570-default-nodemanager-datanode", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "volume_size": 2, "volumes_per_node": 2, "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.717891 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_9_0/0000775000175000017500000000000000000000000023241 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_9_0/cluster.json0000664000175000017500000000155200000000000025620 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.9.0", "node_groups": [ { "name": "worker-nodemanager-datanode", "count": 3, "node_group_template_id": "{cdh-590-default-nodemanager-datanode}" }, { "name": "manager", "count": 1, "node_group_template_id": "{cdh-590-default-manager}" }, { "name": "master-core", "count": 1, "node_group_template_id": "{cdh-590-default-master-core}" }, { "name": "master-additional", "count": 1, "node_group_template_id": "{cdh-590-default-master-additional}" } ], "name": "cdh-590-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_9_0/manager.json0000664000175000017500000000056500000000000025554 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.9.0", "node_processes": [ "CLOUDERA_MANAGER", "KMS" ], "name": "cdh-590-default-manager", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_9_0/master-additional.json0000664000175000017500000000103400000000000027533 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.9.0", "node_processes": [ "OOZIE_SERVER", "YARN_JOBHISTORY", "YARN_NODEMANAGER", "HDFS_SECONDARYNAMENODE", "HIVE_METASTORE", "HIVE_SERVER2", "SPARK_YARN_HISTORY_SERVER" ], "name": "cdh-590-default-master-additional", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_9_0/master-core.json0000664000175000017500000000073000000000000026355 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.9.0", "node_processes": [ "HDFS_NAMENODE", "YARN_RESOURCEMANAGER", "SENTRY_SERVER", "YARN_NODEMANAGER", "ZOOKEEPER_SERVER" ], "name": "cdh-590-default-master-core", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/cdh/v5_9_0/worker-nm-dn.json0000664000175000017500000000067400000000000026463 0ustar00zuulzuul00000000000000{ "plugin_name": "cdh", "hadoop_version": "5.9.0", "node_processes": [ "HDFS_DATANODE", "YARN_NODEMANAGER" ], "name": "cdh-590-default-nodemanager-datanode", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "volume_size": 2, "volumes_per_node": 2, "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/plugins/default_templates/mapr/0000775000175000017500000000000000000000000022441 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.717891 sahara-16.0.0/sahara/plugins/default_templates/mapr/5_0_0_mrv2/0000775000175000017500000000000000000000000024211 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/5_0_0_mrv2/cluster.json0000664000175000017500000000103700000000000026566 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.0.0.mrv2", "node_groups": [ { "name": "master", "count": 1, "node_group_template_id": "{mapr-500mr2-default-master}" }, { "name": "worker", "count": 3, "node_group_template_id": "{mapr-500mr2-default-worker}" } ], "name": "mapr-500mr2-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/5_0_0_mrv2/master.json0000664000175000017500000000074700000000000026407 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.0.0.mrv2", "node_processes": [ "ZooKeeper", "Webserver", "CLDB", "FileServer", "ResourceManager", "HistoryServer", "Oozie" ], "name": "mapr-500mr2-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/5_0_0_mrv2/worker.json0000664000175000017500000000057700000000000026426 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.0.0.mrv2", "node_processes": [ "FileServer", "NodeManager" ], "name": "mapr-500mr2-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.717891 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_1_0_mrv2/0000775000175000017500000000000000000000000024400 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_1_0_mrv2/cluster.json0000664000175000017500000000104000000000000026747 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.1.0.mrv2", "node_groups": [ { "name": "master", "count": 1, "node_group_template_id": "{mapr-510mr2-default-master}" }, { "name": "worker", "count": 3, "node_group_template_id": "{mapr-510mr2-default-worker}" } ], "name": "mapr-510mr2-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_1_0_mrv2/master.json0000664000175000017500000000124500000000000026570 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.1.0.mrv2", "node_processes": [ "Metrics", "Webserver", "ZooKeeper", "HTTPFS", "Oozie", "FileServer", "CLDB", "Flume", "Hue", "NodeManager", "HistoryServer", "ResourceManager", "HiveServer2", "HiveMetastore", "Sqoop2-Client", "Sqoop2-Server" ], "name": "mapr-510mr2-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_1_0_mrv2/worker.json0000664000175000017500000000057700000000000026615 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.1.0.mrv2", "node_processes": [ "FileServer", "NodeManager" ], "name": "mapr-510mr2-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.717891 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_2_0_mrv2/0000775000175000017500000000000000000000000024401 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_2_0_mrv2/cluster.json0000664000175000017500000000104000000000000026750 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.2.0.mrv2", "node_groups": [ { "name": "master", "count": 1, "node_group_template_id": "{mapr-520mr2-default-master}" }, { "name": "worker", "count": 3, "node_group_template_id": "{mapr-520mr2-default-worker}" } ], "name": "mapr-520mr2-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_2_0_mrv2/master.json0000664000175000017500000000124500000000000026571 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.2.0.mrv2", "node_processes": [ "Metrics", "Webserver", "ZooKeeper", "HTTPFS", "Oozie", "FileServer", "CLDB", "Flume", "Hue", "NodeManager", "HistoryServer", "ResourceManager", "HiveServer2", "HiveMetastore", "Sqoop2-Client", "Sqoop2-Server" ], "name": "mapr-520mr2-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/mapr/v5_2_0_mrv2/worker.json0000664000175000017500000000057700000000000026616 0ustar00zuulzuul00000000000000{ "plugin_name": "mapr", "hadoop_version": "5.2.0.mrv2", "node_processes": [ "FileServer", "NodeManager" ], "name": "mapr-520mr2-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/plugins/default_templates/spark/0000775000175000017500000000000000000000000022622 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.721891 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_3_1/0000775000175000017500000000000000000000000023612 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_3_1/cluster.json0000664000175000017500000000102300000000000026162 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "1.3.1", "node_groups": [ { "name": "slave", "count": 3, "node_group_template_id": "{spark-131-default-slave}" }, { "name": "master", "count": 1, "node_group_template_id": "{spark-131-default-master}" } ], "name": "spark-131-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_3_1/master.json0000664000175000017500000000056200000000000026003 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "1.3.1", "node_processes": [ "namenode", "master" ], "name": "spark-131-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_3_1/slave.json0000664000175000017500000000056000000000000025620 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "1.3.1", "node_processes": [ "datanode", "slave" ], "name": "spark-131-default-slave", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.721891 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_6_0/0000775000175000017500000000000000000000000023614 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_6_0/cluster.json0000664000175000017500000000102300000000000026164 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "1.6.0", "node_groups": [ { "name": "slave", "count": 3, "node_group_template_id": "{spark-160-default-slave}" }, { "name": "master", "count": 1, "node_group_template_id": "{spark-160-default-master}" } ], "name": "spark-160-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_6_0/master.json0000664000175000017500000000056200000000000026005 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "1.6.0", "node_processes": [ "namenode", "master" ], "name": "spark-160-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v1_6_0/slave.json0000664000175000017500000000056000000000000025622 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "1.6.0", "node_processes": [ "datanode", "slave" ], "name": "spark-160-default-slave", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.721891 sahara-16.0.0/sahara/plugins/default_templates/spark/v2_1_0/0000775000175000017500000000000000000000000023610 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v2_1_0/cluster.json0000664000175000017500000000102300000000000026160 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "2.1.0", "node_groups": [ { "name": "slave", "count": 3, "node_group_template_id": "{spark-210-default-slave}" }, { "name": "master", "count": 1, "node_group_template_id": "{spark-210-default-master}" } ], "name": "spark-210-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v2_1_0/master.json0000664000175000017500000000056200000000000026001 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "2.1.0", "node_processes": [ "namenode", "master" ], "name": "spark-210-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/spark/v2_1_0/slave.json0000664000175000017500000000056000000000000025616 0ustar00zuulzuul00000000000000{ "plugin_name": "spark", "hadoop_version": "2.1.0", "node_processes": [ "datanode", "slave" ], "name": "spark-210-default-slave", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/plugins/default_templates/storm/0000775000175000017500000000000000000000000022646 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.721891 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_0_1/0000775000175000017500000000000000000000000023633 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_0_1/cluster.json0000664000175000017500000000102700000000000026207 0ustar00zuulzuul00000000000000{ "plugin_name": "storm", "hadoop_version": "1.0.1", "node_groups": [ { "name": "slave", "count": 3, "node_group_template_id": "{storm-101-default-slave}" }, { "name": "master-zk", "count": 1, "node_group_template_id": "{storm-101-default-master}" } ], "name": "storm-101-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_0_1/master.json0000664000175000017500000000056400000000000026026 0ustar00zuulzuul00000000000000{ "plugin_name": "storm", "hadoop_version": "1.0.1", "node_processes": [ "nimbus", "zookeeper" ], "name": "storm-101-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_0_1/slave.json0000664000175000017500000000054300000000000025642 0ustar00zuulzuul00000000000000{ "plugin_name": "storm", "hadoop_version": "1.0.1", "node_processes": [ "supervisor" ], "name": "storm-101-default-slave", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.721891 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_1_0/0000775000175000017500000000000000000000000023633 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_1_0/cluster.json0000664000175000017500000000102700000000000026207 0ustar00zuulzuul00000000000000{ "plugin_name": "storm", "hadoop_version": "1.1.0", "node_groups": [ { "name": "slave", "count": 3, "node_group_template_id": "{storm-110-default-slave}" }, { "name": "master-zk", "count": 1, "node_group_template_id": "{storm-110-default-master}" } ], "name": "storm-110-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_1_0/master.json0000664000175000017500000000056400000000000026026 0ustar00zuulzuul00000000000000{ "plugin_name": "storm", "hadoop_version": "1.1.0", "node_processes": [ "nimbus", "zookeeper" ], "name": "storm-110-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/storm/v1_1_0/slave.json0000664000175000017500000000054300000000000025642 0ustar00zuulzuul00000000000000{ "plugin_name": "storm", "hadoop_version": "1.1.0", "node_processes": [ "supervisor" ], "name": "storm-110-default-slave", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/template.conf0000664000175000017500000000326400000000000024171 0ustar00zuulzuul00000000000000[DEFAULT] # Set the flavor_id to 2 which is m1.small in the # default flavor set flavor_id = 2 auto_security_group = true [cdh-550-default-manager] # For the CDH plugin, version 550, set the flavor_id # of the manager node to 4 which is m1.large in the # default flavor set flavor_id = 4 [cdh-550-default-master-core] # For the CDH plugin, version 550, set the flavor_id # of the master-core node to 3 which is m1.medium in the # default flavor set flavor_id = 3 [cdh-550-default-master-additional] # For the CDH plugin, version 550, set the flavor_id # of the master-additional node to 3 which is m1.medium in the # default flavor set flavor_id = 3 [cdh-570-default-manager] # For the CDH plugin, version 570, set the flavor_id # of the manager node to 4 which is m1.large in the # default flavor set flavor_id = 4 [cdh-570-default-master-core] # For the CDH plugin, version 570, set the flavor_id # of the master-core node to 3 which is m1.medium in the # default flavor set flavor_id = 3 [cdh-570-default-master-additional] # For the CDH plugin, version 570, set the flavor_id # of the master-additional node to 3 which is m1.medium in the # default flavor set flavor_id = 3 [cdh-590-default-manager] # For the CDH plugin, version 590, set the flavor_id # of the manager node to 4 which is m1.large in the # default flavor set flavor_id = 4 [cdh-590-default-master-core] # For the CDH plugin, version 590, set the flavor_id # of the master-core node to 3 which is m1.medium in the # default flavor set flavor_id = 3 [cdh-590-default-master-additional] # For the CDH plugin, version 590, set the flavor_id # of the master-additional node to 3 which is m1.medium in the # default flavor set flavor_id = 3 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.597891 sahara-16.0.0/sahara/plugins/default_templates/vanilla/0000775000175000017500000000000000000000000023130 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.721891 sahara-16.0.0/sahara/plugins/default_templates/vanilla/v2_7_1/0000775000175000017500000000000000000000000024125 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/vanilla/v2_7_1/cluster.json0000664000175000017500000000103500000000000026500 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "{vanilla-271-default-worker}" }, { "name": "master", "count": 1, "node_group_template_id": "{vanilla-271-default-master}" } ], "name": "vanilla-271-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {}, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/vanilla/v2_7_1/master.json0000664000175000017500000000074000000000000026314 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_processes": [ "namenode", "resourcemanager", "historyserver", "oozie", "hive server", "spark history server" ], "name": "vanilla-271-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/default_templates/vanilla/v2_7_1/worker.json0000664000175000017500000000065400000000000026336 0ustar00zuulzuul00000000000000{ "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_processes": [ "datanode", "nodemanager" ], "name": "vanilla-271-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", "security_groups": "{security_groups}", "volume_size": 2, "volumes_per_node": 2, "is_protected": true } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/edp.py0000664000175000017500000000577200000000000017135 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp import hdfs_helper from sahara.service.edp import job_utils from sahara.service.edp.oozie import engine as oozie_engine from sahara.service.edp.oozie.workflow_creator import workflow_factory from sahara.service.edp.spark import engine as spark_engine from sahara.service.edp.storm import engine as storm_engine from sahara.utils import edp JOB_TYPE_HIVE = edp.JOB_TYPE_HIVE JOB_TYPE_SPARK = edp.JOB_TYPE_SPARK JOB_TYPE_JAVA = edp.JOB_TYPE_JAVA JOB_TYPE_SHELL = edp.JOB_TYPE_SHELL JOB_TYPE_PIG = edp.JOB_TYPE_PIG JOB_TYPE_STORM = edp.JOB_TYPE_STORM JOB_TYPE_PYLEUS = edp.JOB_TYPE_PYLEUS JOB_TYPE_MAPREDUCE = edp.JOB_TYPE_MAPREDUCE JOB_TYPE_MAPREDUCE_STREAMING = edp.JOB_TYPE_MAPREDUCE_STREAMING JOB_TYPES_ALL = edp.JOB_TYPES_ALL JOB_STATUS_SUCCEEDED = edp.JOB_STATUS_SUCCEEDED class PluginsStormJobEngine(storm_engine.StormJobEngine): def __init__(self, cluster, **kwargs): super(PluginsStormJobEngine, self).__init__(cluster) class PluginsStormPyleusJobEngine(storm_engine.StormPyleusJobEngine): def __init__(self, cluster, **kwargs): super(PluginsStormPyleusJobEngine, self).__init__(cluster) class PluginsSparkJobEngine(spark_engine.SparkJobEngine): def __init__(self, cluster, **kwargs): super(PluginsSparkJobEngine, self).__init__(cluster) class PluginsSparkShellJobEngine(spark_engine.SparkShellJobEngine): def __init__(self, cluster, **kwargs): super(PluginsSparkShellJobEngine, self).__init__(cluster) class PluginsOozieJobEngine(oozie_engine.OozieJobEngine): def __init__(self, cluster, **kwargs): super(PluginsOozieJobEngine, self).__init__(cluster) def get_hive_shared_conf_path(hdfs_user, **kwargs): return edp.get_hive_shared_conf_path(hdfs_user) def compare_job_type(job_type, *args, **kwargs): return edp.compare_job_type(job_type, *args, **kwargs) def get_builtin_binaries(job, configs, **kwargs): return edp.get_builtin_binaries(job, configs) def create_dir_hadoop2(r, dir_name, hdfs_user, **kwargs): hdfs_helper.create_dir_hadoop2(r, dir_name, hdfs_user) def create_hbase_common_lib(r, **kwargs): hdfs_helper.create_hbase_common_lib(r) def get_plugin(cluster, **kwargs): return job_utils.get_plugin(cluster) def get_possible_job_config(job_type, **kwargs): return workflow_factory.get_possible_job_config(job_type) def get_possible_mapreduce_configs(**kwargs): return workflow_factory.get_possible_mapreduce_configs() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/exceptions.py0000664000175000017500000001655000000000000020542 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sahara.exceptions as e from sahara.i18n import _ class NodeGroupCannotBeScaled(e.SaharaException): def __init__(self, ng_name, reason): self.message = _("Chosen node group %(ng_name)s cannot be scaled : " "%(reason)s") % {"ng_name": ng_name, "reason": reason} self.code = "NODE_GROUP_CANNOT_BE_SCALED" super(NodeGroupCannotBeScaled, self).__init__() class DecommissionError(e.SaharaException): code = "DECOMMISSION_ERROR" message = _("Failed to decommission cluster") def __init__(self, message=None): if message: self.message = message super(DecommissionError, self).__init__() class ClusterCannotBeScaled(e.SaharaException): def __init__(self, cluster_name, reason): self.message = _("Cluster %(cluster_name)s cannot be scaled : " "%(reason)s") % {"cluster_name": cluster_name, "reason": reason} self.code = "CLUSTER_CANNOT_BE_SCALED" super(ClusterCannotBeScaled, self).__init__() class RequiredServiceMissingException(e.SaharaException): """Exception indicating that a required service has not been deployed.""" def __init__(self, service_name, required_by=None): self.message = (_('Cluster is missing a service: %s') % service_name) if required_by: self.message = (_('%(message)s, required by service: ' '%(required_by)s') % {'message': self.message, 'required_by': required_by}) self.code = 'MISSING_SERVICE' super(RequiredServiceMissingException, self).__init__() class InvalidComponentCountException(e.SaharaException): """Exception indicating invalid number of components in a cluster. A message indicating a number of components cluster should contain and an invalid number of components are being deployed in a cluster. """ def __init__(self, component, expected_count, count, description=None): message = _("Hadoop cluster should contain %(expected_count)s " "%(component)s component(s)." " Actual %(component)s count is %(count)s") if description: message = ("%(message)s. %(description)s" % {'message': message, 'description': description}) self.message = message % {"expected_count": expected_count, "component": component, "count": count} self.code = "INVALID_COMPONENT_COUNT" super(InvalidComponentCountException, self).__init__() class InvalidClusterTopology(e.SaharaException): """Exception indicating another problems in a cluster topology, which is different from InvalidComponentCountException and RequiredServiceMissingException. """ code = "INVALID_TOPOLOGY" message = _("Cluster has invalid topology: {description}") def __init__(self, description): self.message = self.message.format(description=description) super(InvalidClusterTopology, self).__init__() class HadoopProvisionError(e.SaharaException): """Exception indicating that cluster provisioning failed. A message indicating the reason for failure must be provided. """ base_message = _("Failed to Provision Hadoop Cluster: %s") def __init__(self, message): self.code = "HADOOP_PROVISION_FAILED" self.message = self.base_message % message super(HadoopProvisionError, self).__init__() class NameNodeHAConfigurationError(e.SaharaException): """Exception indicating that hdp or cdh HDFS HA failed. A message indicating the reason for failure must be provided. """ base_message = _("NameNode High Availability: %s") def __init__(self, message): self.code = "NAMENODE_HIGHAVAILABILITY_CONFIGURATION_FAILED" self.message = self.base_message % message super(NameNodeHAConfigurationError, self).__init__() class ResourceManagerHAConfigurationError(e.SaharaException): """Exception indicating that cdh YARN HA failed. A message indicating the reason for failure must be provided. """ base_message = _("ResourceManager High Availability: %s") def __init__(self, message): self.code = "RESOURCEMANAGER_HIGHAVAILABILITY_CONFIGURATION_FAILED" self.message = self.base_message % message super(ResourceManagerHAConfigurationError, self).__init__() class ImageValidationSpecificationError(e.SaharaException): """Exception indicating that an image validation spec is in error.""" base_message = _("Image validation spec is in error: %s") def __init__(self, message, *args): self.code = "IMAGE_SPECIFICATION_ERROR" self.message = self.base_message % message super(ImageValidationSpecificationError, self).__init__() class ImageValidationError(e.SaharaException): """Exception indicating that an image has failed validation.""" base_message = _("Image has failed validation: %s") def __init__(self, message): self.code = "IMAGE_VALIDATION_FAILED" self.message = self.base_message % message super(ImageValidationError, self).__init__() class AllValidationsFailedError(ImageValidationError): """Exception indicating that all validations in an any block failed.""" sub_message = _("All validations have failed: %s") def __init__(self, exceptions): data = ";".join(ex.message for ex in exceptions) message = self.sub_message % data super(AllValidationsFailedError, self).__init__(message) class InvalidVolumeSizeException(e.SaharaException): """Exception indicating invalid configuration of components in a cluster. """ def __init__(self, volume_size, reserved): message = _("Volume size: %(volume_size)s GB should be greater than " "value of \"dfs_datanode_du_reserved\": %(reserved)s GB") self.message = message % {"volume_size": volume_size, "reserved": reserved} self.code = "INVALID_CONFIGURATION" super(InvalidVolumeSizeException, self).__init__() class PluginInvalidDataException(e.InvalidDataException): code = "INVALID_DATA" message = _("Data is invalid") class PluginConfigurationError(e.ConfigurationError): code = "CONFIGURATION_ERROR" message = _("The configuration has failed") class PluginNotFoundException(e.NotFoundException): code = "NOT_FOUND" message_template = _("Object '%s' is not found") class SaharaPluginException(e.SaharaException): code = "Sahara Plugin Exception" message = "" class PluginRemoteCommandException(e.RemoteCommandException): code = "REMOTE_COMMAND_FAILED" message_template = _("Error during command execution: \"%s\"") ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.721891 sahara-16.0.0/sahara/plugins/fake/0000775000175000017500000000000000000000000016706 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/fake/__init__.py0000664000175000017500000000000000000000000021005 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/fake/edp_engine.py0000664000175000017500000000346100000000000021361 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp import base_engine from sahara.service.validations.edp import job_execution as j from sahara.utils import edp class FakeJobEngine(base_engine.JobEngine): def cancel_job(self, job_execution): pass def get_job_status(self, job_execution): pass def run_job(self, job_execution): return 'engine_job_id', edp.JOB_STATUS_SUCCEEDED, None def run_scheduled_job(self, job_execution): pass def validate_job_execution(self, cluster, job, data): if job.type == edp.JOB_TYPE_SHELL: return # All other types except Java require input and output # objects and Java require main class if job.type in [edp.JOB_TYPE_JAVA, edp.JOB_TYPE_SPARK]: j.check_main_class_present(data, job) else: j.check_data_sources(data, job) job_type, subtype = edp.split_job_type(job.type) if job_type == edp.JOB_TYPE_MAPREDUCE and ( subtype == edp.JOB_SUBTYPE_STREAMING): j.check_streaming_present(data, job) @staticmethod def get_possible_job_config(job_type): return None @staticmethod def get_supported_job_types(): return edp.JOB_TYPES_ALL ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/fake/plugin.py0000664000175000017500000001125000000000000020555 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import context from sahara.i18n import _ from sahara.plugins import exceptions as pex from sahara.plugins.fake import edp_engine from sahara.plugins import kerberos as krb from sahara.plugins import provisioning as p from sahara.plugins import utils as plugin_utils class FakePluginProvider(p.ProvisioningPluginBase): def get_title(self): return "Fake Plugin" def get_description(self): return _("It's a fake plugin that aimed to work on the CirrOS images. " "It doesn't install Hadoop. It's needed to be able to test " "provisioning part of Sahara codebase itself.") def get_versions(self): return ["0.1"] def get_labels(self): return { 'plugin_labels': { 'enabled': {'status': True}, 'hidden': {'status': True}, }, 'version_labels': { '0.1': {'enabled': {'status': True}} } } def get_node_processes(self, hadoop_version): return { "HDFS": ["namenode", "datanode"], "MapReduce": ["tasktracker", "jobtracker"], "Kerberos": [], } def get_configs(self, hadoop_version): # returning kerberos configs return krb.get_config_list() def configure_cluster(self, cluster): with context.ThreadGroup() as tg: for instance in plugin_utils.get_instances(cluster): tg.spawn('fake-write-%s' % instance.id, self._write_ops, instance) def start_cluster(self, cluster): self.deploy_kerberos(cluster) with context.ThreadGroup() as tg: for instance in plugin_utils.get_instances(cluster): tg.spawn('fake-check-%s' % instance.id, self._check_ops, instance) def deploy_kerberos(self, cluster): all_instances = plugin_utils.get_instances(cluster) namenodes = plugin_utils.get_instances(cluster, 'namenode') server = None if len(namenodes) > 0: server = namenodes[0] elif len(all_instances) > 0: server = all_instances[0] if server: krb.deploy_infrastructure(cluster, server) def scale_cluster(self, cluster, instances): with context.ThreadGroup() as tg: for instance in instances: tg.spawn('fake-scaling-%s' % instance.id, self._all_check_ops, instance) def decommission_nodes(self, cluster, instances): pass def _write_ops(self, instance): with instance.remote() as r: # check typical SSH command r.execute_command('echo "Hello, world!"') # check write file data_1 = "sp@m" r.write_file_to('test_data', data_1, run_as_root=True) # check append file data_2 = " and eggs" r.append_to_file('test_data', data_2, run_as_root=True) # check replace string r.replace_remote_string('test_data', "eggs", "pony") def _check_ops(self, instance): expected_data = "sp@m and pony" with instance.remote() as r: actual_data = r.read_file_from('test_data', run_as_root=True) if actual_data.strip() != expected_data.strip(): raise pex.HadoopProvisionError("ACTUAL:\n%s\nEXPECTED:\n%s" % ( actual_data, expected_data)) def _all_check_ops(self, instance): self._write_ops(instance) self._check_ops(instance) def get_edp_engine(self, cluster, job_type): if job_type in edp_engine.FakeJobEngine.get_supported_job_types(): return edp_engine.FakeJobEngine() def get_edp_job_types(self, versions=None): res = {} for vers in self.get_versions(): if not versions or vers in versions: res[vers] = edp_engine.FakeJobEngine.get_supported_job_types() return res def get_edp_config_hints(self, job_type, version): if version in self.get_versions(): return edp_engine.FakeJobEngine.get_possible_job_config(job_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/health_check_base.py0000664000175000017500000002176600000000000021762 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import functools import threading from eventlet import timeout as e_timeout from oslo_config import cfg from oslo_log import log as logging import six from sahara import conductor from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.plugins import base as plugin_base from sahara.service.health import common from sahara.utils import cluster as cluster_utils from sahara.utils.notification import sender cond = conductor.API CONF = cfg.CONF LOG = logging.getLogger(__name__) class BaseHealthError(ex.SaharaException): message_template = _("Cluster health is %(status)s. Reason: %(reason)s") code = 'HEALTH_ERROR' status = 'UNKNOWN' def __init__(self, reason): message = self.message_template % { 'status': self.status, 'reason': reason} # Ignoring Error id because it's not really needed super(BaseHealthError, self).__init__(message, inject_error_id=False) class RedHealthError(BaseHealthError): """Exception to indicate red state of the health check.""" code = "RED_STATE" status = common.HEALTH_STATUS_RED class YellowHealthError(BaseHealthError): """Exception to indicate yellow state of health check.""" code = "YELLOW_STATE" status = common.HEALTH_STATUS_YELLOW @six.add_metaclass(abc.ABCMeta) class BasicHealthCheck(object): def __init__(self, cluster): """Initialize a health check for the specified cluster.""" self.cluster = cluster self.health_check_id = None @abc.abstractmethod def get_health_check_name(self): """Return name of the health check.""" pass @abc.abstractmethod def is_available(self): """Validate availability of the health check for the specified cluster Return True when the health check is available for the specified cluster and False when it's not. """ pass @abc.abstractmethod def check_health(self): """Check health of the specified cluster Returns description if health check executed successfully. Raises YellowStateError to indicate Yellow result of the health check, and RedStateError to indicate Red result of the health check. """ pass def _indicate_start(self): vid = self.cluster.verification.id self.health_check_id = cond.cluster_health_check_add( context.ctx(), vid, {'status': common.HEALTH_STATUS_CHECKING, 'name': self.get_health_check_name()}).id self.health_check = cond.cluster_health_check_get( context.ctx(), self.health_check_id) sender.health_notify(self.cluster, self.health_check) def _write_result(self, status, description): cond.cluster_health_check_update( context.ctx(), self.health_check_id, {'status': status, 'description': description}) self.health_check = cond.cluster_health_check_get( context.ctx(), self.health_check_id) sender.health_notify(self.cluster, self.health_check) def execute(self): timeout = CONF.cluster_verifications.verification_timeout try: with e_timeout.Timeout(timeout, ex.TimeoutException(timeout)): if not self.is_available(): return self._indicate_start() try: result = self.check_health() status = common.HEALTH_STATUS_GREEN except Exception as exc: result = six.text_type(exc) if isinstance(exc, BaseHealthError): status = exc.status else: status = common.HEALTH_STATUS_RED except ex.TimeoutException: result = _("Health check timed out") status = common.HEALTH_STATUS_YELLOW self._write_result(status, result) class AllInstancesAccessible(BasicHealthCheck): def __init__(self, cluster, provider): self.provider = provider super(AllInstancesAccessible, self).__init__(cluster) def is_available(self): # always available : ) return True def get_health_check_name(self): return "Check of instances accessibility" def check_health(self): inst_ips_or_names = self.provider.get_accessibility_data() if inst_ips_or_names: insts = ', '.join(inst_ips_or_names) LOG.exception( "Instances (%s) are not available in the cluster", insts) raise RedHealthError( _("Instances (%s) are not available in the cluster.") % insts) return _("All instances are available") class ResolvConfIsUnchanged(BasicHealthCheck): def __init__(self, cluster, provider): self.provider = provider super(ResolvConfIsUnchanged, self).__init__(cluster) def is_available(self): return True def get_health_check_name(self): return "Check of '/etc/resolv.conf' files" def check_health(self): bad_inst, bad_res_conf = self.provider.get_resolv_conf_data() bad_inst_msg = '' res_conf_msg = '' if bad_inst: insts = ', '.join(bad_inst) bad_inst_msg = _("Couldn't read '/etc/resolv.conf' " "on instances: {}.").format(insts) if bad_res_conf: insts = ', '.join(bad_res_conf) ns = ', '.join(CONF.nameservers) res_conf_msg = _( "Instances ({}) have incorrect '/etc/resolv.conf' " "file, expected nameservers: {}.").format(insts, ns) if bad_inst_msg or res_conf_msg: LOG.exception("{} {}".format(res_conf_msg, bad_inst_msg)) raise RedHealthError(_("{} {}").format(res_conf_msg, bad_inst_msg)) return _("All instances have correct '/etc/resolv.conf' file") class AlertsProvider(object): def __init__(self, cluster): self._data = None self._cluster = cluster self._instances = None self.get_alerts_data() def _instance_get_data(self, instance, lock): try: with instance.remote() as r: data = self._get_resolv_conf(r) except Exception: data = None LOG.exception("Couldn't read '/etc/resolv.conf'") with lock: self._data[instance.get_ip_or_dns_name()] = data def get_accessibility_data(self): bad_instances = [] for el in self._data: if self._data[el] is None: bad_instances.append(el) return bad_instances def get_resolv_conf_data(self): bad_instances = [] bad_resolv_conf = [] for inst_ip_or_name, data in self._data.iteritems(): if data is None: bad_instances.append(inst_ip_or_name) continue for nameserver in CONF.nameservers: if nameserver not in data: bad_resolv_conf.append(inst_ip_or_name) break return bad_instances, bad_resolv_conf @staticmethod def _get_resolv_conf(inst_r): # returns None if error occurred while reading resolv.conf # otherwise returns content of this file code, resolv_conf = inst_r.execute_command( "cat /etc/resolv.conf", raise_when_error=False) if code != 0: return None return resolv_conf def get_alerts_data(self, check_type=None): if check_type and self._data is not None: # return cached data return self._data.get(check_type, []) self._data = {} self._instances = cluster_utils.get_instances(self._cluster) lock = threading.Lock() with context.ThreadGroup() as tg: for ins in self._instances: tg.spawn('Get health check data of instance %s' % ins.id, self._instance_get_data, ins, lock) return self._data def get_basic(cluster): provider = AlertsProvider(cluster) basic = [functools.partial(AllInstancesAccessible, provider=provider)] if cluster.use_designate_feature(): basic.append(functools.partial( ResolvConfIsUnchanged, provider=provider)) return basic def get_health_checks(cluster): plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) plugin_specific = plugin.get_health_checks(cluster) plugin_specific.extend(get_basic(cluster)) return plugin_specific ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/images.py0000664000175000017500000012766300000000000017636 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils import abc import collections import copy import functools import itertools from os import path import jsonschema import six import yaml from sahara import exceptions as ex from sahara.i18n import _ from sahara.plugins import exceptions as p_ex from sahara.plugins import utils def transform_exception(from_type, to_type, transform_func=None): """Decorator to transform exception types. :param from_type: The type of exception to catch and transform. :param to_type: The type of exception to raise instead. :param transform_func: A function to transform from_type into to_type, which must be of the form func(exc, to_type). Defaults to: lambda exc, new_type: new_type(exc.message) """ if not transform_func: transform_func = lambda exc, new_type: new_type(exc.message) def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): try: func(*args, **kwargs) except from_type as exc: raise transform_func(exc, to_type) return handler return decorator def validate_instance(instance, validators, test_only=False, **kwargs): """Runs all validators against the specified instance. :param instance: An instance to validate. :param validators: A sequence of ImageValidators. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :raises ImageValidationError: If validation fails. """ with instance.remote() as remote: for validator in validators: validator.validate(remote, test_only=test_only, **kwargs) class ImageArgument(object): """An argument used by an image manifest.""" SPEC_SCHEMA = { "type": "object", "items": { "type": "object", "properties": { "target_variable": { "type": "string", "minLength": 1 }, "description": { "type": "string", "minLength": 1 }, "default": { "type": "string", "minLength": 1 }, "required": { "type": "boolean", "minLength": 1 }, "choices": { "type": "array", "minLength": 1, "items": { "type": "string" } } } } } @classmethod def from_spec(cls, spec): """Constructs and returns a set of arguments from a specification. :param spec: The specification for the argument set. :return: A dict of arguments built to the specification. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) arguments = {name: cls(name, arg.get('description'), arg.get('default'), arg.get('required'), arg.get('choices')) for name, arg in six.iteritems(spec)} reserved_names = ['distro', 'test_only'] for name, arg in six.iteritems(arguments): if name in reserved_names: raise p_ex.ImageValidationSpecificationError( _("The following argument names are reserved: " "{names}").format(reserved_names)) if not arg.default and not arg.required: raise p_ex.ImageValidationSpecificationError( _("Argument {name} is not required and must specify a " "default value.").format(name=arg.name)) if arg.choices and arg.default and arg.default not in arg.choices: raise p_ex.ImageValidationSpecificationError( _("Argument {name} specifies a default which is not one " "of its choices.").format(name=arg.name)) return arguments def __init__(self, name, description=None, default=None, required=False, choices=None): self.name = name self.description = description self.default = default self.required = required self.choices = choices @six.add_metaclass(abc.ABCMeta) class ImageValidator(object): """Validates the image spawned to an instance via a set of rules.""" @abc.abstractmethod def validate(self, remote, test_only=False, **kwargs): """Validates the image. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :raises ImageValidationError: If validation fails. """ pass @six.add_metaclass(abc.ABCMeta) class SaharaImageValidatorBase(ImageValidator): """Base class for Sahara's native image validation.""" DISTRO_KEY = 'distro' TEST_ONLY_KEY = 'test_only' ORDERED_VALIDATORS_SCHEMA = { "type": "array", "items": { "type": "object", "minProperties": 1, "maxProperties": 1 } } _DISTRO_FAMILES = { 'centos': 'redhat', 'centos7': 'redhat', 'fedora': 'redhat', 'redhat': 'redhat', 'rhel': 'redhat', 'redhatenterpriseserver': 'redhat', 'ubuntu': 'debian' } @staticmethod def get_validator_map(custom_validator_map=None): """Gets the map of validator name token to validator class. :param custom_validator_map: A map of validator names and classes to add to the ones Sahara provides by default. These will take precedence over the base validators in case of key overlap. :return: A map of validator names and classes. """ default_validator_map = { 'package': SaharaPackageValidator, 'script': SaharaScriptValidator, 'copy_script': SaharaCopyScriptValidator, 'any': SaharaAnyValidator, 'all': SaharaAllValidator, 'os_case': SaharaOSCaseValidator, 'argument_case': SaharaArgumentCaseValidator, 'argument_set': SaharaArgumentSetterValidator, } if custom_validator_map: default_validator_map.update(custom_validator_map) return default_validator_map @classmethod def from_yaml(cls, yaml_path, validator_map=None, resource_roots=None, package='sahara'): """Constructs and returns a validator from the provided yaml file. :param yaml_path: The relative path to a yaml file. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A SaharaImageValidator built to the yaml specification. """ validator_map = validator_map or {} resource_roots = resource_roots or [] file_text = utils.get_file_text(yaml_path, package) spec = yaml.safe_load(file_text) validator_map = cls.get_validator_map(validator_map) return cls.from_spec(spec, validator_map, resource_roots, package) @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Constructs and returns a validator from a specification object. :param spec: The specification for the validator. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A validator built to the specification. """ pass @classmethod def from_spec_list(cls, specs, validator_map, resource_roots, package='sahara'): """Constructs a list of validators from a list of specifications. :param specs: A list of validator specifications, each of which will be a dict of size 1, where the key represents the validator type and the value respresents its specification. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A list of validators. """ validators = [] for spec in specs: validator_class, validator_spec = cls.get_class_from_spec( spec, validator_map) validators.append(validator_class.from_spec( validator_spec, validator_map, resource_roots, package)) return validators @classmethod def get_class_from_spec(cls, spec, validator_map): """Gets the class and specification from a validator dict. :param spec: A validator specification including its type: a dict of size 1, where the key represents the validator type and the value respresents its configuration. :param validator_map: A map of validator name to class. :return: A tuple of validator class and configuration. """ key, value = list(six.iteritems(spec))[0] validator_class = validator_map.get(key, None) if not validator_class: raise p_ex.ImageValidationSpecificationError( _("Validator type %s not found.") % validator_class) return validator_class, value class ValidationAttemptFailed(object): """An object representing a failed validation attempt. Primarily for use by the SaharaAnyValidator, which must aggregate failures for error exposition purposes. """ def __init__(self, exception): self.exception = exception def __bool__(self): return False def __nonzero__(self): return False def try_validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate, but returns rather than raising on failure. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. :return: True if successful, ValidationAttemptFailed object if failed. """ try: self.validate( remote, test_only=test_only, image_arguments=image_arguments, **kwargs) return True except p_ex.ImageValidationError as exc: return self.ValidationAttemptFailed(exc) class SaharaImageValidator(SaharaImageValidatorBase): """The root of any tree of SaharaImageValidators. This validator serves as the root of the tree for SaharaImageValidators, and provides any needed initialization (such as distro retrieval.) """ SPEC_SCHEMA = { "title": "SaharaImageValidator", "type": "object", "properties": { "validators": SaharaImageValidatorBase.ORDERED_VALIDATORS_SCHEMA }, "required": ["validators"] } def get_argument_list(self): return [argument for name, argument in six.iteritems(self.arguments)] @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Constructs and returns a validator from a specification object. :param spec: The specification for the validator: a dict containing the key "validators", which contains a list of validator specifications. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A SaharaImageValidator containing all specified validators. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) arguments_spec = spec.get('arguments', {}) arguments = ImageArgument.from_spec(arguments_spec) validators_spec = spec['validators'] validator = SaharaAllValidator.from_spec( validators_spec, validator_map, resource_roots, package) return cls(validator, arguments) def __init__(self, validator, arguments): """Constructor method. :param validator: A SaharaAllValidator containing the specified validators. """ self.validator = validator self.validators = validator.validators self.arguments = arguments @transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError) def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate the image. Before deferring to contained validators, performs one-time setup steps such as distro discovery. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. :raises ImageValidationError: If validation fails. """ argument_values = {} for name, argument in six.iteritems(self.arguments): if name not in image_arguments: if argument.required: raise p_ex.ImageValidationError( _("Argument {name} is required for image " "processing.").format(name=name)) else: argument_values[name] = argument.default else: value = image_arguments[name] choices = argument.choices if choices and value not in choices: raise p_ex.ImageValidationError( _("Value for argument {name} must be one of " "{choices}.").format(name=name, choices=choices)) else: argument_values[name] = value argument_values[self.DISTRO_KEY] = remote.get_os_distrib() self.validator.validate(remote, test_only=test_only, image_arguments=argument_values) class SaharaPackageValidator(SaharaImageValidatorBase): """A validator that checks package installation state on the instance.""" class Package(object): def __init__(self, name, version=None): self.name = name self.version = version def __str__(self): return ("%s-%s" % (self.name, self.version) if self.version else self.name) _SINGLE_PACKAGE_SCHEMA = { "oneOf": [ { "type": "object", "minProperties": 1, "maxProperties": 1, "additionalProperties": { "type": "object", "properties": { "version": { "type": "string", "minLength": 1 }, } }, }, { "type": "string", "minLength": 1 } ] } SPEC_SCHEMA = { "title": "SaharaPackageValidator", "oneOf": [ _SINGLE_PACKAGE_SCHEMA, { "type": "array", "items": _SINGLE_PACKAGE_SCHEMA, "minLength": 1 } ] } @classmethod def _package_from_spec(cls, spec): """Builds a single package object from a specification. :param spec: May be a string or single-length dictionary of name to configuration values. :return: A package object. """ if isinstance(spec, six.string_types): return cls.Package(spec, None) else: package, properties = list(six.iteritems(spec))[0] version = properties.get('version', None) return cls.Package(package, version) @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Builds a package validator from a specification. :param spec: May be a string, a single-length dictionary of name to configuration values, or a list containing any number of either or both of the above. Configuration values may include: version: The version of the package to check and/or install. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A validator that will check that the specified package or packages are installed. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) packages = ([cls._package_from_spec(package_spec) for package_spec in spec] if isinstance(spec, list) else [cls._package_from_spec(spec)]) return cls(packages) def __init__(self, packages): self.packages = packages @transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError) def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate package installation on the image. Even if test_only=False, attempts to verify previous package installation offline before using networked tools to validate or install new packages. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. :raises ImageValidationError: If validation fails. """ env_distro = image_arguments[self.DISTRO_KEY] env_family = self._DISTRO_FAMILES[env_distro] check, install = self._DISTRO_TOOLS[env_family] if not env_family: raise p_ex.ImageValidationError( _("Unknown distro: cannot verify or install packages.")) try: check(self, remote) except (ex.SubprocessException, ex.RemoteCommandException, RuntimeError): if not test_only: install(self, remote) check(self, remote) else: raise def _dpkg_check(self, remote): check_cmd = ("dpkg -s %s" % " ".join(str(package) for package in self.packages)) return _sudo(remote, check_cmd) def _rpm_check(self, remote): check_cmd = ("rpm -q %s" % " ".join(str(package) for package in self.packages)) return _sudo(remote, check_cmd) def _yum_install(self, remote): install_cmd = ( "yum install -y %s" % " ".join(str(package) for package in self.packages)) _sudo(remote, install_cmd) def _apt_install(self, remote): install_cmd = ( "DEBIAN_FRONTEND=noninteractive apt-get -y install %s" % " ".join(str(package) for package in self.packages)) return _sudo(remote, install_cmd) _DISTRO_TOOLS = { "redhat": (_rpm_check, _yum_install), "debian": (_dpkg_check, _apt_install) } class SaharaScriptValidator(SaharaImageValidatorBase): """A validator that runs a script on the instance.""" _DEFAULT_ENV_VARS = [SaharaImageValidatorBase.TEST_ONLY_KEY, SaharaImageValidatorBase.DISTRO_KEY] SPEC_SCHEMA = { "title": "SaharaScriptValidator", "oneOf": [ { "type": "object", "minProperties": 1, "maxProperties": 1, "additionalProperties": { "type": "object", "properties": { "env_vars": { "type": "array", "items": { "type": "string" } }, "output": { "type": "string", "minLength": 1 }, "inline": { "type": "string", "minLength": 1 } }, } }, { "type": "string" } ] } @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Builds a script validator from a specification. :param spec: May be a string or a single-length dictionary of name to configuration values. Configuration values include: env_vars: A list of environment variable names to send to the script. output: A key into which to put the stdout of the script in the image_arguments of the validation run. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A validator that will run a script on the image. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) script_contents = None if isinstance(spec, six.string_types): script_path = spec env_vars, output_var = cls._DEFAULT_ENV_VARS, None else: script_path, properties = list(six.iteritems(spec))[0] env_vars = cls._DEFAULT_ENV_VARS + properties.get('env_vars', []) output_var = properties.get('output', None) script_contents = properties.get('inline') if not script_contents: for root in resource_roots: file_path = path.join(root, script_path) script_contents = utils.try_get_file_text(file_path, package) if script_contents: break if not script_contents: raise p_ex.ImageValidationSpecificationError( _("Script %s not found in any resource roots.") % script_path) return SaharaScriptValidator(script_contents, env_vars, output_var) def __init__(self, script_contents, env_vars=None, output_var=None): """Constructor method. :param script_contents: A string representation of the script. :param env_vars: A list of environment variables to send to the script. :param output_var: A key into which to put the stdout of the script in the image_arguments of the validation run. :return: A SaharaScriptValidator. """ self.script_contents = script_contents self.env_vars = env_vars or [] self.output_var = output_var @transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError) def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate by running a script on the image. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. Note that the key SIV_TEST_ONLY will be set to 1 if the script should test_only and 0 otherwise; all scripts should act on this input if possible. The key SIV_DISTRO will also contain the distro representation, per `lsb_release -is`. :raises ImageValidationError: If validation fails. """ arguments = copy.deepcopy(image_arguments) arguments[self.TEST_ONLY_KEY] = 1 if test_only else 0 script = "\n".join(["%(env_vars)s", "%(script)s"]) env_vars = "\n".join("export %s=%s" % (key, value) for (key, value) in six.iteritems(arguments) if key in self.env_vars) script = script % {"env_vars": env_vars, "script": self.script_contents.decode('utf-8')} path = '/tmp/%s.sh' % uuidutils.generate_uuid() remote.write_file_to(path, script, run_as_root=True) _sudo(remote, 'chmod +x %s' % path) code, stdout = _sudo(remote, path) if self.output_var: image_arguments[self.output_var] = stdout class SaharaCopyScriptValidator(SaharaImageValidatorBase): """A validator that copy a script to the instance.""" SPEC_SCHEMA = { "title": "SaharaCopyScriptValidator", "oneOf": [ { "type": "object", "minProperties": 1, "maxProperties": 1, "additionalProperties": { "type": "object", "properties": { "output": { "type": "string", "minLength": 1 }, "inline": { "type": "string", "minLength": 1 } }, } }, { "type": "string" } ] } @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Builds a copy script validator from a specification. :param spec: May be a string or a single-length dictionary of name to configuration values. Configuration values include: env_vars: A list of environment variable names to send to the script. output: A key into which to put the stdout of the script in the image_arguments of the validation run. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A validator that will copy a script to the image. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) script_contents = None if isinstance(spec, six.string_types): script_path = spec output_var = None else: script_path, properties = list(six.iteritems(spec))[0] output_var = properties.get('output', None) script_contents = properties.get('inline') if not script_contents: for root in resource_roots: file_path = path.join(root, script_path) script_contents = utils.try_get_file_text(file_path, package) if script_contents: break script_name = script_path.split('/')[2] if not script_contents: raise p_ex.ImageValidationSpecificationError( _("Script %s not found in any resource roots.") % script_path) return SaharaCopyScriptValidator(script_contents, script_name, output_var) def __init__(self, script_contents, script_name, output_var=None): """Constructor method. :param script_contents: A string representation of the script. :param output_var: A key into which to put the stdout of the script in the image_arguments of the validation run. :return: A SaharaScriptValidator. """ self.script_contents = script_contents self.script_name = script_name self.output_var = output_var @transform_exception(ex.RemoteCommandException, p_ex.ImageValidationError) def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate by running a script on the image. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. Note that the key SIV_TEST_ONLY will be set to 1 if the script should test_only and 0 otherwise; all scripts should act on this input if possible. The key SIV_DISTRO will also contain the distro representation, per `lsb_release -is`. :raises ImageValidationError: If validation fails. """ arguments = copy.deepcopy(image_arguments) arguments[self.TEST_ONLY_KEY] = 1 if test_only else 0 script = "\n".join(["%(script)s"]) script = script % {"script": self.script_contents} path = '/tmp/%s' % self.script_name remote.write_file_to(path, script, run_as_root=True) @six.add_metaclass(abc.ABCMeta) class SaharaAggregateValidator(SaharaImageValidatorBase): """An abstract class representing an ordered list of other validators.""" SPEC_SCHEMA = SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Builds the aggregate validator from a specification. :param spec: A list of validator definitions, each of which is a single-length dictionary of name to configuration values. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: An aggregate validator. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) validators = cls.from_spec_list(spec, validator_map, resource_roots, package) return cls(validators) def __init__(self, validators): self.validators = validators class SaharaAnyValidator(SaharaAggregateValidator): """A list of validators, only one of which must succeed.""" def _try_all(self, remote, test_only=False, image_arguments=None, **kwargs): results = [] for validator in self.validators: result = validator.try_validate(remote, test_only=test_only, image_arguments=image_arguments, **kwargs) results.append(result) if result: break return results def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate any of the contained validators. Note that if test_only=False, this validator will first run all contained validators using test_only=True, and succeed immediately should any pass validation. If all fail, it will only then run them using test_only=False, and again succeed immediately should any pass. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. :raises ImageValidationError: If validation fails. """ results = self._try_all(remote, test_only=True, image_arguments=image_arguments) if not test_only and not any(results): results = self._try_all(remote, test_only=False, image_arguments=image_arguments) if not any(results): raise p_ex.AllValidationsFailedError(result.exception for result in results) class SaharaAllValidator(SaharaAggregateValidator): """A list of validators, all of which must succeed.""" def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate all of the contained validators. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. :raises ImageValidationError: If validation fails. """ for validator in self.validators: validator.validate(remote, test_only=test_only, image_arguments=image_arguments) class SaharaOSCaseValidator(SaharaImageValidatorBase): """A validator which will take different actions depending on distro.""" _distro_tuple = collections.namedtuple('Distro', ['distro', 'validator']) SPEC_SCHEMA = { "type": "array", "minLength": 1, "items": { "type": "object", "minProperties": 1, "maxProperties": 1, "additionalProperties": SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA, } } @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Builds an os_case validator from a specification. :param spec: A list of single-length dictionaries. The key of each is a distro or family name and the value under each key is a list of validators (all of which must succeed.) :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A SaharaOSCaseValidator. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) distros = itertools.chain(*(six.iteritems(distro_spec) for distro_spec in spec)) distros = [ cls._distro_tuple(key, SaharaAllValidator.from_spec( value, validator_map, resource_roots, package)) for (key, value) in distros] return cls(distros) def __init__(self, distros): """Constructor method. :param distros: A list of distro tuples (distro, list of validators). """ self.distros = distros def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate depending on distro. May match the OS by specific distro or by family (centos may match "centos" or "redhat", for instance.) If multiple keys match the distro, only the validators under the first matched key will be run. If no keys match, no validators are run, and validation proceeds. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. :raises ImageValidationError: If validation fails. """ env_distro = image_arguments[self.DISTRO_KEY] family = self._DISTRO_FAMILES.get(env_distro) matches = {env_distro, family} if family else {env_distro} for distro, validator in self.distros: if distro in matches: validator.validate( remote, test_only=test_only, image_arguments=image_arguments) break class SaharaArgumentCaseValidator(SaharaImageValidatorBase): """A validator which will take different actions depending on distro.""" SPEC_SCHEMA = { "type": "object", "properties": { "argument_name": { "type": "string", "minLength": 1 }, "cases": { "type": "object", "minProperties": 1, "additionalProperties": SaharaImageValidator.ORDERED_VALIDATORS_SCHEMA, }, }, "additionalProperties": False, "required": ["argument_name", "cases"] } @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Builds an argument_case validator from a specification. :param spec: A dictionary with two items: "argument_name", containing a string indicating the argument to be checked, and "cases", a dictionary. The key of each item in the dictionary is a value which may or may not match the argument value, and the value is a list of validators to be run in case it does. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A SaharaArgumentCaseValidator. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) argument_name = spec['argument_name'] cases = {key: SaharaAllValidator.from_spec( value, validator_map, resource_roots, package) for key, value in six.iteritems(spec['cases'])} return cls(argument_name, cases) def __init__(self, argument_name, cases): """Constructor method. :param argument_name: The name of an argument. :param cases: A dictionary of possible argument value to a sub-validator to run in case of a match. """ self.argument_name = argument_name self.cases = cases def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate depending on argument value. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. :raises ImageValidationError: If validation fails. """ arg = self.argument_name if arg not in image_arguments: raise p_ex.ImageValidationError( _("Argument {name} not found.").format(name=arg)) value = image_arguments[arg] if value in self.cases: self.cases[value].validate( remote, test_only=test_only, image_arguments=image_arguments) class SaharaArgumentSetterValidator(SaharaImageValidatorBase): """A validator which sets a specific argument to a specific value.""" SPEC_SCHEMA = { "type": "object", "properties": { "argument_name": { "type": "string", "minLength": 1 }, "value": { "type": "string", "minLength": 1 }, }, "additionalProperties": False, "required": ["argument_name", "value"] } @classmethod def from_spec(cls, spec, validator_map, resource_roots, package='sahara'): """Builds an argument_set validator from a specification. :param spec: A dictionary with two items: "argument_name", containing a string indicating the argument to be set, and "value", a value to which to set that argument. :param validator_map: A map of validator name to class. :param resource_roots: The roots from which relative paths to resources (scripts and such) will be referenced. Any resource will be pulled from the first path in the list at which a file exists. :return: A SaharaArgumentSetterValidator. """ jsonschema.validate(spec, cls.SPEC_SCHEMA) argument_name = spec['argument_name'] value = spec['value'] return cls(argument_name, value) def __init__(self, argument_name, value): """Constructor method. :param argument_name: The name of an argument. :param value: A value to which to set that argument. """ self.argument_name = argument_name self.value = value def validate(self, remote, test_only=False, image_arguments=None, **kwargs): """Attempts to validate depending on argument value. :param remote: A remote socket to the instance. :param test_only: If true, all validators will only verify that a desired state is present, and fail if it is not. If false, all validators will attempt to enforce the desired state if possible, and succeed if this enforcement succeeds. :param image_arguments: A dictionary of image argument values keyed by argument name. """ image_arguments[self.argument_name] = self.value def _sudo(remote, cmd, **kwargs): return remote.execute_command(cmd, run_as_root=True, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/kerberos.py0000664000175000017500000003222500000000000020172 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils from sahara import conductor as cond from sahara import context from sahara import exceptions as exc from sahara.i18n import _ from sahara.plugins import provisioning as base from sahara.plugins import utils as pl_utils from sahara.service.castellan import utils as key_manager from sahara.utils import cluster as cl_utils from sahara.utils import cluster_progress_ops as cpo from sahara.utils import files conductor = cond.API LOG = logging.getLogger(__name__) CONF = cfg.CONF POLICY_FILES_DIR = '/tmp/UnlimitedPolicy' class KDCInstallationFailed(exc.SaharaException): code = 'KDC_INSTALL_FAILED' message_template = _('KDC installation failed by reason: {reason}') def __init__(self, reason): message = self.message_template.format(reason=reason) super(KDCInstallationFailed, self).__init__(message) def _config(**kwargs): return base.Config( applicable_target='Kerberos', priority=1, is_optional=True, scope='cluster', **kwargs) enable_kerberos = _config( name='Enable Kerberos Security', config_type='bool', default_value=False) use_existing_kdc = _config( name='Existing KDC', config_type='bool', default_value=False) kdc_server_ip = _config( name='Server IP of KDC', config_type='string', default_value='192.168.0.1', description=_('Server IP of KDC server when using existing KDC')) realm_name = _config( name='Realm Name', config_type='string', default_value='SAHARA-KDC', description=_('The name of realm to be used')) admin_principal = _config( name='Admin principal', config_type='string', default_value='sahara/admin', description=_('Admin principal for existing KDC server')) admin_password = _config( name='Admin password', config_type='string', default_value='') policy_url = _config( name="JCE libraries", config_type='string', default_value=('https://tarballs.openstack.org/sahara-extra/dist/' 'common-artifacts/'), description=_('Java Cryptography Extension (JCE) ' 'Unlimited Strength Jurisdiction Policy Files location') ) def get_config_list(): return [ enable_kerberos, use_existing_kdc, kdc_server_ip, realm_name, admin_principal, admin_password, policy_url, ] def get_kdc_host(cluster, server): if using_existing_kdc(cluster): return "server.%s" % CONF.node_domain return server.fqdn() def is_kerberos_security_enabled(cluster): return pl_utils.get_config_value_or_default( cluster=cluster, config=enable_kerberos) def using_existing_kdc(cluster): return pl_utils.get_config_value_or_default( cluster=cluster, config=use_existing_kdc) def get_kdc_server_ip(cluster): return pl_utils.get_config_value_or_default( cluster=cluster, config=kdc_server_ip) def get_realm_name(cluster): return pl_utils.get_config_value_or_default( cluster=cluster, config=realm_name) def get_admin_principal(cluster): return pl_utils.get_config_value_or_default( cluster=cluster, config=admin_principal) def get_admin_password(cluster): # TODO(vgridnev): support in follow-up improved secret storage for # configs return pl_utils.get_config_value_or_default( cluster=cluster, config=admin_password) def get_policy_url(cluster): return pl_utils.get_config_value_or_default( cluster=cluster, config=policy_url) def setup_clients(cluster, server=None, instances=None): if not instances: instances = cl_utils.get_instances(cluster) server_ip = None cpo.add_provisioning_step( cluster.id, _("Setting Up Kerberos clients"), len(instances)) if not server: server_ip = get_kdc_server_ip(cluster) with context.ThreadGroup() as tg: for instance in instances: tg.spawn('setup-client-%s' % instance.instance_name, _setup_client_node, cluster, instance, server, server_ip) def prepare_policy_files(cluster, instances=None): if instances is None: instances = pl_utils.get_instances(cluster) remote_url = get_policy_url(cluster) cpo.add_provisioning_step( cluster.id, _("Preparing policy files"), len(instances)) with context.ThreadGroup() as tg: for inst in instances: tg.spawn( 'policy-files', _prepare_policy_files, inst, remote_url) def deploy_infrastructure(cluster, server=None): if not is_kerberos_security_enabled(cluster): LOG.debug("Kerberos security disabled for cluster") return if not using_existing_kdc(cluster): deploy_kdc_server(cluster, server) setup_clients(cluster, server) def _execute_script(client, script): with client.remote() as remote: script_path = '/tmp/%s' % uuidutils.generate_uuid()[:8] remote.write_file_to(script_path, script) remote.execute_command('chmod +x %s' % script_path) remote.execute_command('bash %s' % script_path) remote.execute_command('rm -rf %s' % script_path) def _get_kdc_config(cluster, os): if os == "ubuntu": data = files.get_file_text('plugins/resources/kdc_conf') else: data = files.get_file_text('plugins/resources/kdc_conf_redhat') return data % { 'realm_name': get_realm_name(cluster) } def _get_krb5_config(cluster, server_fqdn): data = files.get_file_text('plugins/resources/krb5_config') return data % { 'realm_name': get_realm_name(cluster), 'server': server_fqdn, 'node_domain': CONF.node_domain, } def _get_short_uuid(): return "%s%s" % (uuidutils.generate_uuid()[:8], uuidutils.generate_uuid()[:8]) def get_server_password(cluster): if using_existing_kdc(cluster): return get_admin_password(cluster) ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster) extra = cluster.extra.to_dict() if cluster.extra else {} passwd_key = 'admin-passwd-kdc' if passwd_key not in extra: passwd = _get_short_uuid() key_id = key_manager.store_secret(passwd, ctx) extra[passwd_key] = key_id cluster = conductor.cluster_update(ctx, cluster, {'extra': extra}) passwd = key_manager.get_secret(extra.get(passwd_key), ctx) return passwd def _get_configs_dir(os): if os == "ubuntu": return "/etc/krb5kdc" return "/var/kerberos/krb5kdc" def _get_kdc_conf_path(os): return "%s/kdc.conf" % _get_configs_dir(os) def _get_realm_create_command(os): if os == 'ubuntu': return "krb5_newrealm" return "kdb5_util create -s" def _get_acl_config_path(os): return "%s/kadm5.acl" % _get_configs_dir(os) def _get_acl_config(): return "*/admin * " def _get_start_command(os, version): if os == "ubuntu": return ("sudo service krb5-kdc restart && " "sudo service krb5-admin-server restart") if version.startswith('6'): return ("sudo /etc/rc.d/init.d/krb5kdc start " "&& sudo /etc/rc.d/init.d/kadmin start") if version.startswith('7'): return ("sudo systemctl start krb5kdc &&" "sudo systemctl start kadmin") raise ValueError( _("Unable to get kdc server start command")) def _get_server_installation_script(cluster, server_fqdn, os, version): data = files.get_file_text( 'plugins/resources/mit-kdc-server-init.sh.template') return data % { 'kdc_conf': _get_kdc_config(cluster, os), 'kdc_conf_path': _get_kdc_conf_path(os), 'acl_conf': _get_acl_config(), 'acl_conf_path': _get_acl_config_path(os), 'realm_create': _get_realm_create_command(os), 'krb5_conf': _get_krb5_config(cluster, server_fqdn), 'admin_principal': get_admin_principal(cluster), 'password': get_server_password(cluster), 'os': os, 'start_command': _get_start_command(os, version), } @cpo.event_wrapper(True, step=_("Deploy KDC server"), param=('cluster', 0)) def deploy_kdc_server(cluster, server): with server.remote() as r: os = r.get_os_distrib() version = r.get_os_version() script = _get_server_installation_script( cluster, server.fqdn(), os, version) _execute_script(server, script) def _push_etc_hosts_entry(client, entry): with client.remote() as r: r.execute_command('echo %s | sudo tee -a /etc/hosts' % entry) def _get_client_installation_script(cluster, server_fqdn, os): data = files.get_file_text('plugins/resources/krb-client-init.sh.template') return data % { 'os': os, 'krb5_conf': _get_krb5_config(cluster, server_fqdn), } @cpo.event_wrapper(True, param=('client', 1)) def _setup_client_node(cluster, client, server=None, server_ip=None): if server: server_fqdn = server.fqdn() elif server_ip: server_fqdn = "server." % CONF.node_domain _push_etc_hosts_entry( client, "%s %s %s" % (server_ip, server_fqdn, server)) else: raise KDCInstallationFailed(_('Server or server ip are not provided')) with client.remote() as r: os = r.get_os_distrib() script = _get_client_installation_script(cluster, server_fqdn, os) _execute_script(client, script) @cpo.event_wrapper(True) def _prepare_policy_files(instance, remote_url): with instance.remote() as r: cmd = 'cut -f2 -d \"=\" /etc/profile.d/99-java.sh | head -1' exit_code, java_home = r.execute_command(cmd) java_home = java_home.strip() results = [ r.execute_command( "ls %s/local_policy.jar" % POLICY_FILES_DIR, raise_when_error=False)[0] != 0, r.execute_command( "ls %s/US_export_policy.jar" % POLICY_FILES_DIR, raise_when_error=False)[0] != 0 ] # a least one exit code is not zero if any(results): r.execute_command('mkdir %s' % POLICY_FILES_DIR) r.execute_command( "sudo curl %s/local_policy.jar -o %s/local_policy.jar" % ( remote_url, POLICY_FILES_DIR)) r.execute_command( "sudo curl %s/US_export_policy.jar -o " "%s/US_export_policy.jar" % ( remote_url, POLICY_FILES_DIR)) r.execute_command( 'sudo cp %s/*.jar %s/lib/security/' % (POLICY_FILES_DIR, java_home)) def _get_script_for_user_creation(cluster, instance, user): data = files.get_file_text( 'plugins/resources/create-principal-keytab') cron_file = files.get_file_text('plugins/resources/cron-file') cron_script = files.get_file_text('plugins/resources/cron-script') data = data % { 'user': user, 'admin_principal': get_admin_principal(cluster), 'admin_password': get_server_password(cluster), 'principal': "%s/sahara-%s@%s" % ( user, instance.fqdn(), get_realm_name(cluster)), 'keytab': '%s-sahara-%s.keytab' % (user, instance.fqdn()) } cron_script_location = '/tmp/sahara-kerberos/%s.sh' % _get_short_uuid() cron_file = cron_file % {'refresher': cron_script_location, 'user': user} cron_script = cron_script % { 'principal': "%s/sahara-%s@%s" % ( user, instance.fqdn(), get_realm_name(cluster)), 'keytab': '%s-sahara-%s.keytab' % (user, instance.fqdn()), 'user': user, } return data, cron_file, cron_script, cron_script_location def _create_keytabs_for_user(instance, user): script, cron, cron_script, cs_location = _get_script_for_user_creation( instance.cluster, instance, user) _execute_script(instance, script) # setting up refresher with instance.remote() as r: tmp_location = '/tmp/%s' % _get_short_uuid() r.write_file_to(tmp_location, cron_script, run_as_root=True) r.execute_command( "cat {0} | sudo tee {1} " "&& rm -rf {0} && sudo chmod +x {1}".format( tmp_location, cs_location)) r.execute_command( 'echo "%s" | sudo tee /etc/cron.d/%s.cron' % ( cron, _get_short_uuid())) # executing script r.execute_command('sudo bash %s' % cs_location) @cpo.event_wrapper( True, step=_('Setting up keytabs for users'), param=('cluster', 0)) def create_keytabs_for_map(cluster, mapper): # cluster parameter is used by event log feature with context.ThreadGroup() as tg: for user, instances in mapper.items(): for instance in instances: tg.spawn( 'create-keytabs', _create_keytabs_for_user, instance, user) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/labels.py0000664000175000017500000002011200000000000017610 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from oslo_log import log as logging from sahara import conductor as cond from sahara import context from sahara import exceptions as ex from sahara.i18n import _ conductor = cond.API LOG = logging.getLogger(__name__) STABLE = { 'name': 'stable', 'mutable': False, 'description': "Indicates that plugin or its version are stable to be used" } DEPRECATED = { 'name': 'deprecated', 'mutable': False, 'description': "Plugin or its version is deprecated and will be removed " "in future releases. Please, consider to use another " "plugin or its version." } ENABLED = { 'name': 'enabled', 'mutable': True, 'description': "Plugin or its version is enabled and can be used by user." } HIDDEN = { 'name': 'hidden', 'mutable': True, 'description': "Existence of plugin or its version is hidden, but " "still can be used for cluster creation by CLI and " "directly by client." } PLUGIN_LABELS_SCOPE = 'plugin_labels' VERSION_LABELS_SCOPE = 'version_labels' MUTABLE = 'mutable' LABEL_OBJECT = { 'type': 'object', 'properties': { 'status': { 'type': 'boolean', } }, "additionalProperties": False, } class LabelHandler(object): def __init__(self, loaded_plugins): self.plugins = loaded_plugins def get_plugin_update_validation_jsonschema(self): schema = { 'type': 'object', "additionalProperties": False, 'properties': { VERSION_LABELS_SCOPE: { 'type': 'object', 'additionalProperties': False, }, }, } ln = [label['name'] for label in self.get_labels()] labels_descr_object = { 'type': 'object', "properties": {name: copy.deepcopy(LABEL_OBJECT) for name in ln}, "additionalProperties": False } schema['properties'][PLUGIN_LABELS_SCOPE] = copy.deepcopy( labels_descr_object) all_versions = [] for plugin_name in self.plugins.keys(): plugin = self.plugins[plugin_name] all_versions.extend(plugin.get_versions()) all_versions = set(all_versions) schema['properties'][VERSION_LABELS_SCOPE]['properties'] = { ver: copy.deepcopy(labels_descr_object) for ver in all_versions } return schema def get_default_label_details(self, plugin_name): plugin = self.plugins.get(plugin_name) return plugin.get_labels() def get_label_details(self, plugin_name): try: plugin = conductor.plugin_get(context.ctx(), plugin_name) except Exception: LOG.error("Unable to retrieve plugin data from database") plugin = None if not plugin: plugin = self.get_default_label_details(plugin_name) fields = ['name', 'id', 'updated_at', 'created_at'] for field in fields: if field in plugin: del plugin[field] return plugin def get_label_full_details(self, plugin_name): return self.expand_data(self.get_label_details(plugin_name)) def get_labels(self): return [HIDDEN, STABLE, ENABLED, DEPRECATED] def get_labels_map(self): return { label['name']: label for label in self.get_labels() } def expand_data(self, plugin): plugin_labels = plugin.get(PLUGIN_LABELS_SCOPE) labels_map = self.get_labels_map() for key in plugin_labels.keys(): key_desc = labels_map.get(key) plugin_labels[key].update(key_desc) del plugin_labels[key]['name'] for version in plugin.get(VERSION_LABELS_SCOPE): vers_labels = plugin.get(VERSION_LABELS_SCOPE).get(version) for key in vers_labels.keys(): key_desc = labels_map.get(key) vers_labels[key].update(key_desc) del vers_labels[key]['name'] return plugin def _validate_labels_update(self, default_data, update_values): for label in update_values.keys(): if label not in default_data.keys(): raise ex.InvalidDataException( _("Label '%s' can't be updated because it's not " "available for plugin or its version") % label) if not default_data[label][MUTABLE]: raise ex.InvalidDataException( _("Label '%s' can't be updated because it's not " "mutable") % label) def validate_plugin_update(self, plugin_name, values): plugin = self.plugins[plugin_name] # it's important to get full details since we have mutability default = self.get_label_full_details(plugin_name) if values.get(PLUGIN_LABELS_SCOPE): pl = values.get(PLUGIN_LABELS_SCOPE) self._validate_labels_update(default[PLUGIN_LABELS_SCOPE], pl) if values.get(VERSION_LABELS_SCOPE): vl = values.get(VERSION_LABELS_SCOPE) for version in vl.keys(): if version not in plugin.get_versions(): raise ex.InvalidDataException( _("Unknown plugin version '%(version)s' of " "%(plugin)s") % { 'version': version, 'plugin': plugin_name}) self._validate_labels_update( default[VERSION_LABELS_SCOPE][version], vl[version]) def update_plugin(self, plugin_name, values): ctx = context.ctx() current = self.get_label_details(plugin_name) if not conductor.plugin_get(ctx, plugin_name): current['name'] = plugin_name conductor.plugin_create(ctx, current) del current['name'] if values.get(PLUGIN_LABELS_SCOPE): for label in values.get(PLUGIN_LABELS_SCOPE).keys(): current[PLUGIN_LABELS_SCOPE][label].update( values.get(PLUGIN_LABELS_SCOPE).get(label)) else: del current[PLUGIN_LABELS_SCOPE] if values.get(VERSION_LABELS_SCOPE): vl = values.get(VERSION_LABELS_SCOPE) for version in vl.keys(): for label in vl.get(version).keys(): current[VERSION_LABELS_SCOPE][version][label].update( vl[version][label]) else: del current[VERSION_LABELS_SCOPE] conductor.plugin_update(context.ctx(), plugin_name, current) def validate_plugin_labels(self, plugin_name, version): details = self.get_label_details(plugin_name) plb = details.get(PLUGIN_LABELS_SCOPE, {}) if not plb.get('enabled', {}).get('status'): raise ex.InvalidReferenceException( _("Plugin %s is not enabled") % plugin_name) if plb.get('deprecated', {}).get('status', False): LOG.warning("Plugin %s is deprecated and can be removed in " "the next release", plugin_name) vlb = details.get(VERSION_LABELS_SCOPE, {}).get(version, {}) if not vlb.get('enabled', {}).get('status'): raise ex.InvalidReferenceException( _("Version %(version)s of plugin %(plugin)s is not enabled") % {'version': version, 'plugin': plugin_name}) if vlb.get('deprecated', {}).get('status', False): LOG.warning("Using version %(version)s of plugin %(plugin)s is " "deprecated and can removed in next release", {'version': version, 'plugin': plugin_name}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/main.py0000664000175000017500000000142700000000000017302 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import main def set_override(name, override, group, **kwargs): main.CONF.set_override(name, override, group) def clear_override(name, group, **kwargs): return main.CONF.clear_override ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/objects.py0000664000175000017500000000127600000000000020011 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.conductor import objects def is_object_instance(target): return isinstance(target, objects.Instance) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/opts.py0000664000175000017500000000164400000000000017344 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # File contains plugins opts to avoid cyclic imports issue from oslo_config import cfg opts = [ cfg.ListOpt('plugins', default=['vanilla', 'spark', 'cdh', 'ambari', 'storm', 'mapr'], help='List of plugins to be loaded. Sahara preserves the ' 'order of the list when returning it.'), ] CONF = cfg.CONF CONF.register_opts(opts) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/provisioning.py0000664000175000017500000003066500000000000021112 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara import exceptions as ex from sahara.i18n import _ from sahara.plugins import base as plugins_base from sahara.utils import resources class ProvisioningPluginBase(plugins_base.PluginInterface): @plugins_base.required def get_versions(self): """Get available plugin versions :returns: A sequence of strings representing the versions For example: ["1.0.0", "1.0.1"] """ pass @plugins_base.required def get_configs(self, hadoop_version): """Get default configuration for a given plugin version :param hadoop_version: String representing a plugin version :returns: A dict containing the configuration """ pass @plugins_base.required_with_default def get_labels(self): versions = self.get_versions() default = {'enabled': {'status': True}} return { 'plugin_labels': copy.deepcopy(default), 'version_labels': { version: copy.deepcopy(default) for version in versions } } @plugins_base.required def get_node_processes(self, hadoop_version): """Get node processes of a given plugin version :param hadoop_version: String containing a plugin version :returns: A dict where the keys are the core components of the plugin and the value is a sequence of node processes for that component For example: { "HDFS": ["namenode", "datanode"], "Spark": ["master", "slave"] } """ pass @plugins_base.required_with_default def get_required_image_tags(self, hadoop_version): return [self.name, hadoop_version] @plugins_base.required_with_default def validate(self, cluster): pass @plugins_base.required_with_default def validate_scaling(self, cluster, existing, additional): pass @plugins_base.required_with_default def update_infra(self, cluster): pass @plugins_base.required def configure_cluster(self, cluster): pass @plugins_base.required def start_cluster(self, cluster): pass @plugins_base.optional def scale_cluster(self, cluster, instances): pass @plugins_base.optional def get_edp_engine(self, cluster, job_type): pass @plugins_base.optional def get_edp_job_types(self, versions=None): return {} @plugins_base.optional def get_edp_config_hints(self, job_type, version): return {} @plugins_base.required_with_default def get_open_ports(self, node_group): return [] @plugins_base.required_with_default def decommission_nodes(self, cluster, instances): pass @plugins_base.optional def get_image_arguments(self, hadoop_version): """Gets the argument set taken by the plugin's image generator Note: If the plugin can generate or validate an image but takes no arguments, please return an empty sequence rather than NotImplemented for all versions that support image generation or validation. This is used as a flag to determine whether the plugin has implemented this optional feature. :returns: A sequence with items of type sahara.plugins.images.ImageArgument """ return NotImplemented @plugins_base.optional def pack_image(self, hadoop_version, remote, test_only=False, image_arguments=None): """Packs an image for registration in Glance and use by Sahara :param remote: A remote (usually of type sahara.cli.image_pack.api.ImageRemote) that serves as a handle to the image to modify. Note that this image will be modified in-place, not copied. :param test_only: If set to True, this method will only test to ensure that the image already meets the plugin's requirements. This can be used to test images without modification. If set to False per the default, this method will modify the image if any requirements are not met. :param image_arguments: A dict of image argument name to argument value. :raises: sahara.plugins.exceptions.ImageValidationError: If the method fails to modify the image to specification (if test_only is False), or if the method finds that the image does not meet the specification (if test_only is True). :raises: sahara.plugins.exceptions.ImageValidationSpecificationError: If the specification for image generation or validation is itself in error and cannot be executed without repair. """ pass @plugins_base.optional def validate_images(self, cluster, test_only=False, image_arguments=None): """Validates the image to be used by a cluster. :param cluster: The object handle to a cluster which has active instances ready to generate remote handles. :param test_only: If set to True, this method will only test to ensure that the image already meets the plugin's requirements. This can be used to test images without modification. If set to False per the default, this method will modify the image if any requirements are not met. :param image_arguments: A dict of image argument name to argument value. :raises: sahara.plugins.exceptions.ImageValidationError: If the method fails to modify the image to specification (if test_only is False), or if the method finds that the image does not meet the specification (if test_only is True). :raises: sahara.plugins.exceptions.ImageValidationSpecificationError: If the specification for image generation or validation is itself in error and cannot be executed without repair. """ pass @plugins_base.required_with_default def on_terminate_cluster(self, cluster): pass @plugins_base.optional def recommend_configs(self, cluster, scaling=False): pass @plugins_base.required_with_default def get_health_checks(self, cluster): return [] def get_all_configs(self, hadoop_version): common = list_of_common_configs() plugin_specific_configs = self.get_configs(hadoop_version) if plugin_specific_configs: common.extend(plugin_specific_configs) return common def get_version_details(self, version): details = {} configs = self.get_all_configs(version) details['configs'] = [c.dict for c in configs] details['node_processes'] = self.get_node_processes(version) details['required_image_tags'] = self.get_required_image_tags(version) return details def to_dict(self): res = super(ProvisioningPluginBase, self).to_dict() res['versions'] = self.get_versions() return res # Some helpers for plugins def _map_to_user_inputs(self, hadoop_version, configs): config_objs = self.get_all_configs(hadoop_version) # convert config objects to applicable_target -> config_name -> obj config_objs_map = {} for config_obj in config_objs: applicable_target = config_obj.applicable_target confs = config_objs_map.get(applicable_target, {}) confs[config_obj.name] = config_obj config_objs_map[applicable_target] = confs # iterate over all configs and append UserInputs to result list result = [] for applicable_target in configs: for config_name in configs[applicable_target]: confs = config_objs_map.get(applicable_target) if not confs: raise ex.ConfigurationError( _("Can't find applicable target " "'%(applicable_target)s' for '%(config_name)s'") % {"applicable_target": applicable_target, "config_name": config_name}) conf = confs.get(config_name) if not conf: raise ex.ConfigurationError( _("Can't find config '%(config_name)s' " "in '%(applicable_target)s'") % {"config_name": config_name, "applicable_target": applicable_target}) result.append(UserInput( conf, configs[applicable_target][config_name])) return sorted(result) class Config(resources.BaseResource): """Describes a single config parameter. Config type - could be 'str', 'integer', 'boolean', 'enum'. If config type is 'enum' then list of valid values should be specified in config_values property. Priority - integer parameter which helps to differentiate all configurations in the UI. Priority decreases from the lower values to higher values. For example: "some_conf", "map_reduce", "node", is_optional=True """ def __init__(self, name, applicable_target, scope, config_type="string", config_values=None, default_value=None, is_optional=False, description=None, priority=2): self.name = name self.description = description self.config_type = config_type self.config_values = config_values self.default_value = default_value self.applicable_target = applicable_target self.scope = scope self.is_optional = is_optional self.priority = priority def to_dict(self): res = super(Config, self).to_dict() # TODO(slukjanov): all custom fields from res return res def __lt__(self, other): return self.name < other.name def __repr__(self): return '' % (self.name, self.applicable_target) class UserInput(object): """Value provided by the user for a specific config entry.""" def __init__(self, config, value): self.config = config self.value = value def __eq__(self, other): return self.config == other.config and self.value == other.value def __lt__(self, other): return (self.config, self.value) < (other.config, other.value) def __repr__(self): return '' % (self.config.name, self.value) class ValidationError(object): """Describes what is wrong with one of the values provided by user.""" def __init__(self, config, message): self.config = config self.message = message def __repr__(self): return "" % self.config.name # COMMON FOR ALL PLUGINS CONFIGS XFS_ENABLED = Config( "Enable XFS", 'general', 'cluster', priority=1, default_value=True, config_type="bool", is_optional=True, description='Enables XFS for formatting' ) DISKS_PREPARING_TIMEOUT = Config( "Timeout for disk preparing", 'general', 'cluster', priority=1, default_value=300, config_type="int", is_optional=True, description='Timeout for preparing disks, formatting and mounting' ) NTP_URL = Config( "URL of NTP server", 'general', 'cluster', priority=1, default_value='', is_optional=True, description='URL of the NTP server for synchronization time on cluster' ' instances' ) NTP_ENABLED = Config( "Enable NTP service", 'general', 'cluster', priority=1, default_value=True, config_type="bool", description='Enables NTP service for synchronization time on cluster ' 'instances' ) HEAT_WAIT_CONDITION_TIMEOUT = Config( "Heat Wait Condition timeout", "general", "cluster", priority=1, config_type="int", default_value=3600, is_optional=True, description="The number of seconds to wait for the instance to boot") def list_of_common_configs(): return [DISKS_PREPARING_TIMEOUT, NTP_ENABLED, NTP_URL, HEAT_WAIT_CONDITION_TIMEOUT, XFS_ENABLED] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/recommendations_utils.py0000664000175000017500000003573600000000000022777 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_log import log as logging import six from sahara import conductor as cond from sahara import context from sahara.utils.openstack import nova conductor = cond.API LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class AutoConfigsProvider(object): def __init__(self, mapper, plugin_configs, cluster, scaling): """This meta class provides general recommendation utils for cluster configuration. :param mapper: dictionary, that describes which cluster configs and node_configs to configure. It should maps to following dicts: node_configs to configure and cluster_configs to configure. This dicts should contains abstract names of configs as keys and tuple (correct_applicable_target, correct_name) as values. Such representation allows to use same AutoConfigsProvider for plugins with almost same configs and configuring principles. :param plugin_configs: all plugins_configs for specified plugin :param cluster: cluster which is required to configure :param scaling: indicates that current cluster operation is scaling """ self.plugin_configs = plugin_configs self.cluster = cluster self.node_configs_to_update = mapper.get('node_configs', {}) self.cluster_configs_to_update = mapper.get('cluster_configs', {}) self.scaling = scaling @abc.abstractmethod def _get_recommended_node_configs(self, node_group): """Method calculates and returns recommended configs for node_group. It's not required to update node_configs of node_group using the conductor api in this method, because it will be done in the method apply_node_configs. :param node_group: NodeGroup Sahara resource. :return: dictionary with calculated recommended configs for node_group. """ pass @abc.abstractmethod def _get_recommended_cluster_configs(self): """Method calculates and returns recommended configs for cluster. It's not required to update cluster_configs of cluster using the conductor api in this method, because it will be done in the method apply_cluster_configs. :return: dictionary with calculated recommended configs for cluster. """ pass def _can_be_recommended(self, configs_list, node_group=None): """Method calculates and returns True, when it's possible to automatically configure provided list of configs configs_list. Otherwise, method should return False. :param configs_list: list of configs which we want to configure :param node_group: optional argument, which should be provided if some config can be used in node_configs of some node_group :return: True if all configs can be configured and False otherwise """ # cluster configs is Frozen Dict, so let's call to_dict() cl_configs = self.cluster.cluster_configs.to_dict() for ncfg in configs_list: section, name = self._get_correct_section_and_name(ncfg) if section in cl_configs and name in cl_configs[section]: return False if not node_group: return True cl_configs = node_group.node_configs.to_dict() for ncfg in configs_list: section, name = self._get_correct_section_and_name(ncfg) if section in cl_configs and name in cl_configs[section]: return False return True def _get_correct_section_and_name(self, config_name): """Calculates and returns correct applicable target and name from abstract name of config. :param config_name: abstract name of config. :return: correct applicable target and name for config_name """ section, name = None, None if config_name in self.cluster_configs_to_update: section = self.cluster_configs_to_update[config_name][0] name = self.cluster_configs_to_update[config_name][1] elif config_name in self.node_configs_to_update: section = self.node_configs_to_update[config_name][0] name = self.node_configs_to_update[config_name][1] return section, name def _get_default_config_value(self, config_name): """Calculates and return default value of config from abstract name of config. :param config_name: abstract name of config. :return: default config value for config_name. """ section, name = self._get_correct_section_and_name(config_name) for config in self.plugin_configs: if config.applicable_target == section and config.name == name: return config.default_value def _merge_configs(self, current_configs, proposed_configs): """Correctly merges old configs and new extra configs""" result = {} for (section, configs) in six.iteritems(proposed_configs): cfg_values = {} if section in current_configs: cfg_values = (current_configs[section] if current_configs[section] else {}) cfg_values.update(configs) result.update({section: cfg_values}) for (section, configs) in six.iteritems(current_configs): if section not in result: result.update({section: configs}) return result def _get_cluster_extra(self): cluster = self.cluster return cluster.extra.to_dict() if cluster.extra else {} def finalize_autoconfiguration(self): if not self.cluster.use_autoconfig: return cluster_extra = self._get_cluster_extra() cluster_extra['auto-configured'] = True conductor.cluster_update( context.ctx(), self.cluster, {'extra': cluster_extra}) def apply_node_configs(self, node_group): """Method applies configs for node_group using conductor api, which were calculated with recommend_node_configs method. :param node_group: NodeGroup Sahara resource. :return: None. """ if not node_group.use_autoconfig or not self.cluster.use_autoconfig: return to_update = self.node_configs_to_update recommended_node_configs = self._get_recommended_node_configs( node_group) if not recommended_node_configs: # Nothing to configure return current_dict = node_group.node_configs.to_dict() configuration = {} for ncfg in six.iterkeys(to_update): if ncfg not in recommended_node_configs: continue n_section = to_update[ncfg][0] n_name = to_update[ncfg][1] proposed_config_value = recommended_node_configs[ncfg] if n_section not in configuration: configuration.update({n_section: {}}) configuration[n_section].update({n_name: proposed_config_value}) current_dict = self._merge_configs(current_dict, configuration) conductor.node_group_update(context.ctx(), node_group, {'node_configs': current_dict}) def apply_cluster_configs(self): """Method applies configs for cluster using conductor api, which were calculated with recommend_cluster_configs method. :return: None. """ cluster = self.cluster if not cluster.use_autoconfig: return to_update = self.cluster_configs_to_update recommended_cluster_configs = self._get_recommended_cluster_configs() if not recommended_cluster_configs: # Nothing to configure return current_dict = cluster.cluster_configs.to_dict() configuration = {} for ncfg in six.iterkeys(to_update): if ncfg not in recommended_cluster_configs: continue n_section = to_update[ncfg][0] n_name = to_update[ncfg][1] proposed_config_value = recommended_cluster_configs[ncfg] if n_section not in configuration: configuration.update({n_section: {}}) configuration[n_section].update({n_name: proposed_config_value}) current_dict = self._merge_configs(current_dict, configuration) conductor.cluster_update(context.ctx(), cluster, {'cluster_configs': current_dict}) def apply_recommended_configs(self): """Method applies recommended configs for cluster and for all node_groups using conductor api. :return: None. """ if self.scaling: # Validate cluster is not an old created cluster cluster_extra = self._get_cluster_extra() if 'auto-configured' not in cluster_extra: # Don't configure return for ng in self.cluster.node_groups: self.apply_node_configs(ng) self.apply_cluster_configs() configs = list(self.cluster_configs_to_update.keys()) configs.extend(list(self.node_configs_to_update.keys())) LOG.debug("Following configs were auto-configured: {configs}".format( configs=configs)) self.finalize_autoconfiguration() class HadoopAutoConfigsProvider(AutoConfigsProvider): def __init__(self, mapper, plugin_configs, cluster, scaling, hbase=False): super(HadoopAutoConfigsProvider, self).__init__( mapper, plugin_configs, cluster, scaling) self.requested_flavors = {} self.is_hbase_enabled = hbase def _get_java_opts(self, value): return "-Xmx%dm" % int(value) def _transform_mb_to_gb(self, mb): return mb / 1024. def _transform_gb_to_mb(self, gb): return gb * 1024. def _get_min_size_of_container(self, ram): if ram <= 4: return 256 if ram <= 8: return 512 if ram <= 24: return 1024 return 2048 def _get_os_ram_recommendation(self, ram): upper_bounds = [4, 8, 16, 24, 48, 64, 72, 96, 128, 256] reserve_for_os = [1, 2, 2, 4, 6, 8, 8, 12, 24, 32] for (upper, reserve) in zip(upper_bounds, reserve_for_os): if ram <= upper: return reserve return 64 def _get_hbase_ram_recommendations(self, ram): if not self.is_hbase_enabled: return 0 upper_bounds = [4, 8, 16, 24, 48, 64, 72, 96, 128, 256] reserve_for_hbase = [1, 1, 2, 4, 8, 8, 8, 16, 24, 32] for (upper, reserve) in zip(upper_bounds, reserve_for_hbase): if ram <= upper: return reserve return 64 def _get_node_group_data(self, node_group): if node_group.flavor_id not in self.requested_flavors: flavor = nova.get_flavor(id=node_group.flavor_id) self.requested_flavors[node_group.flavor_id] = flavor else: flavor = self.requested_flavors[node_group.flavor_id] cpu = flavor.vcpus ram = flavor.ram data = {} # config recommendations was taken from Ambari code os = self._get_os_ram_recommendation(self._transform_mb_to_gb(ram)) hbase = self._get_hbase_ram_recommendations( self._transform_mb_to_gb(ram)) reserved_ram = self._transform_gb_to_mb(os + hbase) min_container_size = self._get_min_size_of_container( self._transform_mb_to_gb(ram)) # we use large amount of containers to allow users to run # at least 4 jobs at same time on clusters based on small flavors data["containers"] = int(max( 8, min(2 * cpu, ram / min_container_size))) data["ramPerContainer"] = (ram - reserved_ram) / data["containers"] data["ramPerContainer"] = max(data["ramPerContainer"], min_container_size) data["ramPerContainer"] = min(2048, int(data["ramPerContainer"])) data["ramPerContainer"] = int(data["ramPerContainer"]) data["mapMemory"] = int(data["ramPerContainer"]) data["reduceMemory"] = int(2 * data["ramPerContainer"]) data["amMemory"] = int(min(data["mapMemory"], data["reduceMemory"])) return data def _get_recommended_node_configs(self, node_group): """Calculates recommended MapReduce and YARN configs for specified node_group. :param node_group: NodeGroup Sahara resource :return: dictionary with recommended MapReduce and YARN configs """ configs_to_update = list(self.node_configs_to_update.keys()) if not self._can_be_recommended(configs_to_update, node_group): return {} data = self._get_node_group_data(node_group) r = {} r['yarn.nodemanager.resource.memory-mb'] = (data['containers'] * data['ramPerContainer']) r['yarn.scheduler.minimum-allocation-mb'] = data['ramPerContainer'] r['yarn.scheduler.maximum-allocation-mb'] = (data['containers'] * data['ramPerContainer']) r['yarn.nodemanager.vmem-check-enabled'] = "false" r['yarn.app.mapreduce.am.resource.mb'] = data['amMemory'] r['yarn.app.mapreduce.am.command-opts'] = self._get_java_opts( 0.8 * data['amMemory']) r['mapreduce.map.memory.mb'] = data['mapMemory'] r['mapreduce.reduce.memory.mb'] = data['reduceMemory'] r['mapreduce.map.java.opts'] = self._get_java_opts( 0.8 * data['mapMemory']) r['mapreduce.reduce.java.opts'] = self._get_java_opts( 0.8 * data['reduceMemory']) r['mapreduce.task.io.sort.mb'] = int(min( 0.4 * data['mapMemory'], 1024)) return r def get_datanode_name(self): return "datanode" def _get_recommended_cluster_configs(self): """Method recommends dfs_replication for cluster. :return: recommended value of dfs_replication. """ if not self._can_be_recommended(['dfs.replication']): return {} datanode_count = 0 datanode_proc_name = self.get_datanode_name() for ng in self.cluster.node_groups: if datanode_proc_name in ng.node_processes: datanode_count += ng.count replica = 'dfs.replication' recommended_value = self._get_default_config_value(replica) if recommended_value: return {replica: min(recommended_value, datanode_count)} else: return {} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/resource.py0000664000175000017500000000171500000000000020205 0ustar00zuulzuul00000000000000# Copyright (c) 2018 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.conductor import resource def is_resource_instance(target, **kwargs): return isinstance(target, resource.Resource) def create_node_group_resource(data, **kwargs): return resource.NodeGroupResource(data) def create_cluster_resource(data, **kwargs): return resource.ClusterResource(data) def create_resource(data, **kwargs): return resource.Resource(data) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.725891 sahara-16.0.0/sahara/plugins/resources/0000775000175000017500000000000000000000000020012 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/plugins/resources/create-principal-keytab0000664000175000017500000000040000000000000024426 0ustar00zuulzuul00000000000000#!/bin/bash mkdir -p /tmp/sahara-kerberos/ kadmin -p %(admin_principal)s < 1: raise ex.InvalidComponentCountException( node_process, _('0 or 1'), len(instances)) return instances[0] if instances else None def generate_host_names(nodes, **kwargs): return "\n".join([n.hostname() for n in nodes]) def generate_fqdn_host_names(nodes, **kwargs): return "\n".join([n.fqdn() for n in nodes]) def get_port_from_address(address, **kwargs): parse_result = urlparse.urlparse(address) # urlparse do not parse values like 0.0.0.0:8000, # netutils do not parse values like http://localhost:8000, # so combine approach is using if parse_result.port: return parse_result.port else: return netutils.parse_host_port(address)[1] def instances_with_services(instances, node_processes, **kwargs): node_processes = set(node_processes) return list(filter( lambda x: node_processes.intersection( x.node_group.node_processes), instances)) def start_process_event_message(process, **kwargs): return _("Start the following process(es): {process}").format( process=process) def get_config_value_or_default( service=None, name=None, cluster=None, config=None, **kwargs): if not config: if not service or not name: raise RuntimeError(_("Unable to retrieve config details")) default_value = None else: service = config.applicable_target name = config.name default_value = config.default_value cluster_configs = cluster.cluster_configs if cluster_configs.get(service, {}).get(name, None) is not None: return cluster_configs.get(service, {}).get(name, None) # Try getting config from the cluster. for ng in cluster.node_groups: if (ng.configuration().get(service) and ng.configuration()[service].get(name)): return ng.configuration()[service][name] # Find and return the default if default_value is not None: return default_value plugin = plugins_base.PLUGINS.get_plugin(cluster.plugin_name) configs = plugin.get_all_configs(cluster.hadoop_version) for config in configs: if config.applicable_target == service and config.name == name: return config.default_value raise RuntimeError(_("Unable to get parameter '%(param_name)s' from " "service %(service)s"), {'param_name': name, 'service': service}) def cluster_get_instances(cluster, instances_ids=None, **kwargs): return cluster_utils.get_instances(cluster, instances_ids) def check_cluster_exists(cluster, **kwargs): return cluster_utils.check_cluster_exists(cluster) def add_provisioning_step(cluster_id, step_name, total, **kwargs): return ops.add_provisioning_step(cluster_id, step_name, total) def add_successful_event(instance, **kwargs): ops.add_successful_event(instance) def add_fail_event(instance, exception, **kwargs): ops.add_fail_event(instance, exception) def merge_configs(config_a, config_b, **kwargs): return sahara_configs.merge_configs(config_a, config_b) def generate_key_pair(key_length=2048, **kwargs): return crypto.generate_key_pair(key_length) def get_file_text(file_name, package='sahara', **kwargs): return files.get_file_text(file_name, package) def try_get_file_text(file_name, package='sahara', **kwargs): return files.try_get_file_text(file_name, package) def get_by_id(lst, id, **kwargs): return general.get_by_id(lst, id) def natural_sort_key(s, **kwargs): return general.natural_sort_key(s) def get_flavor(**kwargs): return nova.get_flavor(**kwargs) def poll(get_status, kwargs=None, args=None, operation_name=None, timeout_name=None, timeout=poll_utils.DEFAULT_TIMEOUT, sleep=poll_utils.DEFAULT_SLEEP_TIME, exception_strategy='raise'): poll_utils.poll(get_status, kwargs=kwargs, args=args, operation_name=operation_name, timeout_name=timeout_name, timeout=timeout, sleep=sleep, exception_strategy=exception_strategy) def plugin_option_poll(cluster, get_status, option, operation_name, sleep_time, kwargs): poll_utils.plugin_option_poll(cluster, get_status, option, operation_name, sleep_time, kwargs) def create_proxy_user_for_cluster(cluster, **kwargs): return proxy.create_proxy_user_for_cluster(cluster) def get_remote(instance, **kwargs): return remote.get_remote(instance) def rpc_setup(service_name, **kwargs): rpc.setup(service_name) def transform_to_num(s, **kwargs): return types.transform_to_num(s) def is_int(s, **kwargs): return types.is_int(s) def parse_hadoop_xml_with_name_and_value(data, **kwargs): return xmlutils.parse_hadoop_xml_with_name_and_value(data) def create_hadoop_xml(configs, config_filter=None, **kwargs): return xmlutils.create_hadoop_xml(configs, config_filter) def create_elements_xml(configs, **kwargs): return xmlutils.create_elements_xml(configs) def load_hadoop_xml_defaults(file_name, package, **kwargs): return xmlutils.load_hadoop_xml_defaults(file_name, package) def get_property_dict(elem, **kwargs): return xmlutils.get_property_dict(elem) class PluginsApiValidator(api_validator.ApiValidator): def __init__(self, schema, **kwargs): super(PluginsApiValidator, self).__init__(schema) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.725891 sahara-16.0.0/sahara/service/0000775000175000017500000000000000000000000015757 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/__init__.py0000664000175000017500000000000000000000000020056 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.725891 sahara-16.0.0/sahara/service/api/0000775000175000017500000000000000000000000016530 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/__init__.py0000664000175000017500000000120700000000000020641 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. OPS = None def setup_api(ops): global OPS OPS = ops ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v10.py0000664000175000017500000002162700000000000017520 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_utils import excutils import six from sahara import conductor as c from sahara import context from sahara.plugins import base as plugin_base from sahara.service import api from sahara.service.health import verification_base from sahara.service import quotas from sahara.utils import cluster as c_u from sahara.utils import general as g from sahara.utils.notification import sender from sahara.utils.openstack import base as b from sahara.utils.openstack import images as sahara_images conductor = c.API CONF = cfg.CONF # Cluster ops def get_clusters(**kwargs): return conductor.cluster_get_all(context.ctx(), regex_search=True, **kwargs) def get_cluster(id, show_progress=False): return conductor.cluster_get(context.ctx(), id, show_progress) def scale_cluster(id, data): context.set_current_cluster_id(id) ctx = context.ctx() cluster = conductor.cluster_get(ctx, id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get('resize_node_groups', []) additional_node_groups = data.get('add_node_groups', []) # the next map is the main object we will work with # to_be_enlarged : {node_group_id: desired_amount_of_instances} to_be_enlarged = {} for ng in existing_node_groups: ng_id = g.find(cluster.node_groups, name=ng['name'])['id'] to_be_enlarged.update({ng_id: ng['count']}) additional = construct_ngs_for_scaling(cluster, additional_node_groups) cluster = conductor.cluster_get(ctx, cluster) _add_ports_for_auto_sg(ctx, cluster, plugin) try: cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_VALIDATING) quotas.check_scaling(cluster, to_be_enlarged, additional) plugin.recommend_configs(cluster, scaling=True) plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception as e: with excutils.save_and_reraise_exception(): c_u.clean_cluster_from_empty_ng(cluster) c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ACTIVE, six.text_type(e)) # If we are here validation is successful. # So let's update to_be_enlarged map: to_be_enlarged.update(additional) for node_group in cluster.node_groups: if node_group.id not in to_be_enlarged: to_be_enlarged[node_group.id] = node_group.count api.OPS.provision_scaled_cluster(id, to_be_enlarged) return cluster def create_cluster(values): plugin = plugin_base.PLUGINS.get_plugin(values['plugin_name']) return _cluster_create(values, plugin) def create_multiple_clusters(values): num_of_clusters = values['count'] clusters = [] plugin = plugin_base.PLUGINS.get_plugin(values['plugin_name']) for counter in range(num_of_clusters): cluster_dict = values.copy() cluster_name = cluster_dict['name'] cluster_dict['name'] = get_multiple_cluster_name(num_of_clusters, cluster_name, counter + 1) cluster = _cluster_create(cluster_dict, plugin) clusters.append(cluster.id) clusters_dict = {'clusters': clusters} return clusters_dict def _cluster_create(values, plugin): ctx = context.ctx() cluster = conductor.cluster_create(ctx, values) context.set_current_cluster_id(cluster.id) sender.status_notify(cluster.id, cluster.name, "New", "create") _add_ports_for_auto_sg(ctx, cluster, plugin) # validating cluster try: plugin.recommend_configs(cluster) cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_VALIDATING) plugin.validate(cluster) quotas.check_cluster(cluster) except Exception as e: with excutils.save_and_reraise_exception(): c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ERROR, six.text_type(e)) api.OPS.provision_cluster(cluster.id) return cluster def get_multiple_cluster_name(num_of_clusters, name, counter): return "%%s-%%0%dd" % len(str(num_of_clusters)) % (name, counter) def _add_ports_for_auto_sg(ctx, cluster, plugin): for ng in cluster.node_groups: if ng.auto_security_group: ports = {'open_ports': plugin.get_open_ports(ng)} conductor.node_group_update(ctx, ng, ports) def terminate_cluster(id): context.set_current_cluster_id(id) cluster = c_u.change_cluster_status(id, c_u.CLUSTER_STATUS_DELETING) if cluster is None: return api.OPS.terminate_cluster(id) sender.status_notify(cluster.id, cluster.name, cluster.status, "delete") def update_cluster(id, values): if verification_base.update_verification_required(values): api.OPS.handle_verification(id, values) return conductor.cluster_get(context.ctx(), id) return conductor.cluster_update(context.ctx(), id, values) # ClusterTemplate ops def get_cluster_templates(**kwargs): return conductor.cluster_template_get_all(context.ctx(), regex_search=True, **kwargs) def get_cluster_template(id): return conductor.cluster_template_get(context.ctx(), id) def create_cluster_template(values): return conductor.cluster_template_create(context.ctx(), values) def terminate_cluster_template(id): return conductor.cluster_template_destroy(context.ctx(), id) def update_cluster_template(id, values): return conductor.cluster_template_update(context.ctx(), id, values) # NodeGroupTemplate ops def get_node_group_templates(**kwargs): return conductor.node_group_template_get_all(context.ctx(), regex_search=True, **kwargs) def get_node_group_template(id): return conductor.node_group_template_get(context.ctx(), id) def create_node_group_template(values): return conductor.node_group_template_create(context.ctx(), values) def terminate_node_group_template(id): return conductor.node_group_template_destroy(context.ctx(), id) def update_node_group_template(id, values): return conductor.node_group_template_update(context.ctx(), id, values) def export_node_group_template(id): return conductor.node_group_template_get(context.ctx(), id) # Plugins ops def get_plugins(): return plugin_base.PLUGINS.get_plugins(serialized=True) def get_plugin(plugin_name, version=None): return plugin_base.PLUGINS.serialize_plugin(plugin_name, version) def update_plugin(plugin_name, values): return plugin_base.PLUGINS.update_plugin(plugin_name, values) def construct_ngs_for_scaling(cluster, additional_node_groups): ctx = context.ctx() additional = {} for ng in additional_node_groups: count = ng['count'] ng['count'] = 0 ng_id = conductor.node_group_add(ctx, cluster, ng) additional.update({ng_id: count}) return additional # Image Registry def get_images(name, tags): return b.execute_with_retries( sahara_images.image_manager().list_registered, name, tags) def get_image(**kwargs): if len(kwargs) == 1 and 'id' in kwargs: return b.execute_with_retries( sahara_images.image_manager().get, kwargs['id']) else: return b.execute_with_retries( sahara_images.image_manager().find, **kwargs) def get_registered_image(image_id): return b.execute_with_retries( sahara_images.image_manager().get_registered_image, image_id) def register_image(image_id, username, description=None): manager = sahara_images.image_manager() b.execute_with_retries( manager.set_image_info, image_id, username, description) return b.execute_with_retries(manager.get, image_id) def unregister_image(image_id): manager = sahara_images.image_manager() b.execute_with_retries(manager.unset_image_info, image_id) return b.execute_with_retries(manager.get, image_id) def add_image_tags(image_id, tags): manager = sahara_images.image_manager() b.execute_with_retries(manager.tag, image_id, tags) return b.execute_with_retries(manager.get, image_id) def remove_image_tags(image_id, tags): manager = sahara_images.image_manager() b.execute_with_retries(manager.untag, image_id, tags) return b.execute_with_retries(manager.get, image_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v11.py0000664000175000017500000002022400000000000017511 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging import six from sahara import conductor as c from sahara import context from sahara import exceptions as ex from sahara.plugins import base as plugin_base from sahara.service import api from sahara.service.edp.binary_retrievers import dispatch from sahara.service.edp import job_manager as manager from sahara.utils import edp from sahara.utils import proxy as p conductor = c.API LOG = logging.getLogger(__name__) CONF = cfg.CONF def get_job_types(**kwargs): # Return a dictionary of all the job types that can be run # by this instance of Sahara. For each job type, the value # will be a list of plugins that support the job type. For # each plugin, include a dictionary of the versions that # support the job type. # All entries in kwargs are expected to have list values hints = kwargs.get("hints", ["false"])[0].lower() == "true" plugin_names = kwargs.get("plugin", []) all_plugins = plugin_base.PLUGINS.get_plugins() if plugin_names: plugins = filter(lambda x: x.name in plugin_names, all_plugins) else: plugins = all_plugins job_types = kwargs.get("type", edp.JOB_TYPES_ALL) versions = kwargs.get("version", []) res = [] for job_type in job_types: # All job types supported by all versions of the plugin. # This is a dictionary where keys are plugin version # strings and values are lists of job types job_entry = {"name": job_type, "plugins": []} for plugin in plugins: types_for_plugin = plugin.get_edp_job_types(versions) # dict returns a new object so we are not modifying the plugin p = plugin.dict # Find only the versions of this plugin that support the job. # Additionally, instead of a list we want a dictionary of # plugin versions with corresponding config hints p["versions"] = {} for version, supported_types in six.iteritems(types_for_plugin): if job_type in supported_types: if hints: config_hints = plugin.get_edp_config_hints(job_type, version) else: config_hints = {} p["versions"][version] = config_hints # If we found at least one version of the plugin that # supports the job type, add the plugin to the result if p["versions"]: job_entry["plugins"].append(p) if job_entry["plugins"]: res.append(job_entry) return res def get_job_config_hints(job_type): return manager.get_job_config_hints(job_type) def execute_job(job_id, data): # Elements common to all job types cluster_id = data['cluster_id'] configs = data.get('job_configs', {}) interface = data.get('interface', {}) # Not in Java job types but present for all others input_id = data.get('input_id', None) output_id = data.get('output_id', None) # Since we will use a unified class in the database, we pass # a superset for all job types # example configs['start'] = '2015-05-12T08:55Z' frequency = 5 mins # the job will starts from 2015-05-12T08:55Z, runs every 5 mins job_execution_info = data.get('job_execution_info', {}) configs['job_execution_info'] = job_execution_info job_ex_dict = {'input_id': input_id, 'output_id': output_id, 'job_id': job_id, 'cluster_id': cluster_id, 'info': {'status': edp.JOB_STATUS_PENDING}, 'job_configs': configs, 'extra': {}, 'interface': interface} job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.set_current_job_execution_id(job_execution.id) # check to use proxy user if p.job_execution_requires_proxy_user(job_execution): try: p.create_proxy_user_for_job_execution(job_execution) except ex.SaharaException as e: LOG.error("Can't run job execution. " "(Reasons: {reason})".format(reason=e)) conductor.job_execution_destroy(context.ctx(), job_execution) raise api.OPS.run_edp_job(job_execution.id) return job_execution def get_job_execution_status(id): return manager.get_job_status(id) def job_execution_list(**kwargs): return conductor.job_execution_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_execution(id): return conductor.job_execution_get(context.ctx(), id) def cancel_job_execution(id): context.set_current_job_execution_id(id) job_execution = conductor.job_execution_get(context.ctx(), id) api.OPS.cancel_job_execution(id) return job_execution def update_job_execution(id, values): _update_status(values.pop("info", None), id) return conductor.job_execution_update(context.ctx(), id, values) def _update_status(info, id): if info: status = info.get("status", None) if status == edp.JOB_ACTION_SUSPEND: api.OPS.job_execution_suspend(id) def delete_job_execution(id): context.set_current_job_execution_id(id) api.OPS.delete_job_execution(id) def get_data_sources(**kwargs): return conductor.data_source_get_all(context.ctx(), regex_search=True, **kwargs) def get_data_source(id): return conductor.data_source_get(context.ctx(), id) def delete_data_source(id): conductor.data_source_destroy(context.ctx(), id) def register_data_source(values): return conductor.data_source_create(context.ctx(), values) def data_source_update(id, values): return conductor.data_source_update(context.ctx(), id, values) def get_jobs(**kwargs): return conductor.job_get_all(context.ctx(), regex_search=True, **kwargs) def get_job(id): return conductor.job_get(context.ctx(), id) def create_job(values): return conductor.job_create(context.ctx(), values) def update_job(id, values): return conductor.job_update(context.ctx(), id, values) def delete_job(job_id): return conductor.job_destroy(context.ctx(), job_id) def create_job_binary(values): return conductor.job_binary_create(context.ctx(), values) def get_job_binaries(**kwargs): return conductor.job_binary_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_binary(id): return conductor.job_binary_get(context.ctx(), id) def update_job_binary(id, values): return conductor.job_binary_update(context.ctx(), id, values) def delete_job_binary(id): conductor.job_binary_destroy(context.ctx(), id) def create_job_binary_internal(values): return conductor.job_binary_internal_create(context.ctx(), values) def get_job_binary_internals(**kwargs): return conductor.job_binary_internal_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_binary_internal(id): return conductor.job_binary_internal_get(context.ctx(), id) def delete_job_binary_internal(id): conductor.job_binary_internal_destroy(context.ctx(), id) def get_job_binary_internal_data(id): return conductor.job_binary_internal_get_raw_data(context.ctx(), id) def update_job_binary_internal(id, values): return conductor.job_binary_internal_update(context.ctx(), id, values) def get_job_binary_data(id): job_binary = conductor.job_binary_get(context.ctx(), id) return dispatch.get_raw_binary(job_binary, with_context=True) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.729891 sahara-16.0.0/sahara/service/api/v2/0000775000175000017500000000000000000000000017057 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/__init__.py0000664000175000017500000000000000000000000021156 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/cluster_templates.py0000664000175000017500000000254000000000000023171 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara import context conductor = c.API # ClusterTemplate ops def get_cluster_templates(**kwargs): return conductor.cluster_template_get_all(context.ctx(), regex_search=True, **kwargs) def get_cluster_template(id): return conductor.cluster_template_get(context.ctx(), id) def create_cluster_template(values): return conductor.cluster_template_create(context.ctx(), values) def terminate_cluster_template(id): return conductor.cluster_template_destroy(context.ctx(), id) def update_cluster_template(id, values): return conductor.cluster_template_update(context.ctx(), id, values) def export_cluster_template(id): return conductor.cluster_template_get(context.ctx(), id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/clusters.py0000664000175000017500000001413300000000000021277 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import excutils import six from sahara import conductor as c from sahara import context from sahara.plugins import base as plugin_base from sahara.service import api from sahara.service.health import verification_base from sahara.service import quotas from sahara.utils import cluster as c_u from sahara.utils import general as g from sahara.utils.notification import sender conductor = c.API # Cluster ops def get_clusters(**kwargs): return conductor.cluster_get_all(context.ctx(), regex_search=True, **kwargs) def get_cluster(id, show_progress=False): return conductor.cluster_get(context.ctx(), id, show_progress) def scale_cluster(id, data): context.set_current_cluster_id(id) ctx = context.ctx() cluster = conductor.cluster_get(ctx, id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) existing_node_groups = data.get('resize_node_groups', []) additional_node_groups = data.get('add_node_groups', []) # the next map is the main object we will work with # to_be_enlarged : {node_group_id: desired_amount_of_instances} to_be_enlarged = {} node_group_instance_map = {} for ng in existing_node_groups: ng_id = g.find(cluster.node_groups, name=ng['name'])['id'] to_be_enlarged.update({ng_id: ng['count']}) if 'instances' in ng: node_group_instance_map.update({ng_id: ng['instances']}) additional = construct_ngs_for_scaling(cluster, additional_node_groups) cluster = conductor.cluster_get(ctx, cluster) _add_ports_for_auto_sg(ctx, cluster, plugin) try: cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_VALIDATING) quotas.check_scaling(cluster, to_be_enlarged, additional) plugin.recommend_configs(cluster, scaling=True) plugin.validate_scaling(cluster, to_be_enlarged, additional) except Exception as e: with excutils.save_and_reraise_exception(): c_u.clean_cluster_from_empty_ng(cluster) c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ACTIVE, six.text_type(e)) # If we are here validation is successful. # So let's update to_be_enlarged map: to_be_enlarged.update(additional) for node_group in cluster.node_groups: if node_group.id not in to_be_enlarged: to_be_enlarged[node_group.id] = node_group.count api.OPS.provision_scaled_cluster(id, to_be_enlarged, node_group_instance_map) return cluster def create_cluster(values): plugin = plugin_base.PLUGINS.get_plugin(values['plugin_name']) return _cluster_create(values, plugin) def create_multiple_clusters(values): num_of_clusters = values['count'] clusters = [] plugin = plugin_base.PLUGINS.get_plugin(values['plugin_name']) for counter in range(num_of_clusters): cluster_dict = values.copy() cluster_name = cluster_dict['name'] cluster_dict['name'] = get_multiple_cluster_name(num_of_clusters, cluster_name, counter + 1) cluster = _cluster_create(cluster_dict, plugin).to_wrapped_dict() clusters.append(cluster) clusters_dict = {'clusters': clusters} return clusters_dict def _cluster_create(values, plugin): ctx = context.ctx() cluster = conductor.cluster_create(ctx, values) context.set_current_cluster_id(cluster.id) sender.status_notify(cluster.id, cluster.name, "New", "create") _add_ports_for_auto_sg(ctx, cluster, plugin) # validating cluster try: plugin.recommend_configs(cluster) cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_VALIDATING) plugin.validate(cluster) quotas.check_cluster(cluster) except Exception as e: with excutils.save_and_reraise_exception(): c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ERROR, six.text_type(e)) api.OPS.provision_cluster(cluster.id) return cluster def get_multiple_cluster_name(num_of_clusters, name, counter): return "%%s-%%0%dd" % len(str(num_of_clusters)) % (name, counter) def _add_ports_for_auto_sg(ctx, cluster, plugin): for ng in cluster.node_groups: if ng.auto_security_group: ports = {'open_ports': plugin.get_open_ports(ng)} conductor.node_group_update(ctx, ng, ports) def terminate_cluster(id, force=False): context.set_current_cluster_id(id) cluster = c_u.change_cluster_status(id, c_u.CLUSTER_STATUS_DELETING) if cluster is None: return api.OPS.terminate_cluster(id, force) sender.status_notify(cluster.id, cluster.name, cluster.status, "delete") def update_cluster(id, values): if "update_keypair" in values: if values["update_keypair"]: api.OPS.update_keypair(id) values.pop("update_keypair") if verification_base.update_verification_required(values): api.OPS.handle_verification(id, values) return conductor.cluster_get(context.ctx(), id) return conductor.cluster_update(context.ctx(), id, values) def construct_ngs_for_scaling(cluster, additional_node_groups): ctx = context.ctx() additional = {} for ng in additional_node_groups: count = ng['count'] ng['count'] = 0 ng_id = conductor.node_group_add(ctx, cluster, ng) additional.update({ng_id: count}) return additional ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/data_sources.py0000664000175000017500000000225200000000000022106 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara import context conductor = c.API def get_data_sources(**kwargs): return conductor.data_source_get_all(context.ctx(), regex_search=True, **kwargs) def get_data_source(id): return conductor.data_source_get(context.ctx(), id) def delete_data_source(id): conductor.data_source_destroy(context.ctx(), id) def register_data_source(values): return conductor.data_source_create(context.ctx(), values) def data_source_update(id, values): return conductor.data_source_update(context.ctx(), id, values) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/images.py0000664000175000017500000000517000000000000020701 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara.utils.openstack import base as b from sahara.utils.openstack import images as sahara_images conductor = c.API # Image Registry def get_images(name, tags): return b.execute_with_retries( sahara_images.image_manager().list_registered, name, tags) def get_image(**kwargs): if len(kwargs) == 1 and 'id' in kwargs: return b.execute_with_retries( sahara_images.image_manager().get, kwargs['id']) else: return b.execute_with_retries( sahara_images.image_manager().find, **kwargs) def get_registered_image(id): return b.execute_with_retries( sahara_images.image_manager().get_registered_image, id) def register_image(image_id, username, description=None): manager = sahara_images.image_manager() b.execute_with_retries( manager.set_image_info, image_id, username, description) return b.execute_with_retries(manager.get, image_id) def unregister_image(image_id): manager = sahara_images.image_manager() b.execute_with_retries(manager.unset_image_info, image_id) return b.execute_with_retries(manager.get, image_id) def get_image_tags(image_id): return b.execute_with_retries( sahara_images.image_manager().get, image_id).tags def set_image_tags(image_id, tags): manager = sahara_images.image_manager() image_obj = b.execute_with_retries(manager.get, image_id) org_tags = frozenset(image_obj.tags) new_tags = frozenset(tags) to_add = list(new_tags - org_tags) to_remove = list(org_tags - new_tags) if to_add: b.execute_with_retries(manager.tag, image_id, to_add) if to_remove: b.execute_with_retries(manager.untag, image_id, to_remove) return b.execute_with_retries(manager.get, image_id) def remove_image_tags(image_id): manager = sahara_images.image_manager() image_obj = b.execute_with_retries(manager.get, image_id) tags = image_obj.tags b.execute_with_retries(manager.untag, image_id, tags) return b.execute_with_retries(manager.get, image_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/job_binaries.py0000664000175000017500000000266500000000000022070 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara import context from sahara.service.edp.job_binaries import manager as jb_manager conductor = c.API def create_job_binary(values): return conductor.job_binary_create(context.ctx(), values) def get_job_binaries(**kwargs): return conductor.job_binary_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_binary(id): return conductor.job_binary_get(context.ctx(), id) def update_job_binary(id, values): return conductor.job_binary_update(context.ctx(), id, values) def delete_job_binary(id): conductor.job_binary_destroy(context.ctx(), id) def get_job_binary_data(id): job_binary = conductor.job_binary_get(context.ctx(), id) return jb_manager.JOB_BINARIES.get_job_binary(job_binary.type). \ get_raw_data(job_binary, with_context=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/job_templates.py0000664000175000017500000000237100000000000022264 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara import context from sahara.service.edp import job_manager as manager conductor = c.API def get_job_templates(**kwargs): return conductor.job_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_template(id): return conductor.job_get(context.ctx(), id) def create_job_template(values): return conductor.job_create(context.ctx(), values) def update_job_template(id, values): return conductor.job_update(context.ctx(), id, values) def delete_job_template(job_id): return conductor.job_destroy(context.ctx(), job_id) def get_job_config_hints(job_type): return manager.get_job_config_hints(job_type) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/job_types.py0000664000175000017500000000552300000000000021434 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from sahara.plugins import base as plugin_base from sahara.utils import edp def get_job_types(**kwargs): # Return a dictionary of all the job types that can be run # by this instance of Sahara. For each job type, the value # will be a list of plugins that support the job type. For # each plugin, include a dictionary of the versions that # support the job type. # All entries in kwargs are expected to have list values hints = kwargs.get("hints", ["false"])[0].lower() == "true" plugin_names = kwargs.get("plugin", []) all_plugins = plugin_base.PLUGINS.get_plugins() if plugin_names: plugins = filter(lambda x: x.name in plugin_names, all_plugins) else: plugins = all_plugins job_types = kwargs.get("type", edp.JOB_TYPES_ALL) versions = kwargs.get("version", []) res = [] for job_type in job_types: # All job types supported by all versions of the plugin. # This is a dictionary where keys are plugin version # strings and values are lists of job types job_entry = {"name": job_type, "plugins": []} for plugin in plugins: types_for_plugin = plugin.get_edp_job_types(versions) # dict returns a new object so we are not modifying the plugin p = plugin.dict # Find only the versions of this plugin that support the job. # Additionally, instead of a list we want a dictionary of # plugin versions with corresponding config hints p["versions"] = {} for version, supported_types in six.iteritems(types_for_plugin): if job_type in supported_types: if hints: config_hints = plugin.get_edp_config_hints(job_type, version) else: config_hints = {} p["versions"][version] = config_hints # If we found at least one version of the plugin that # supports the job type, add the plugin to the result if p["versions"]: job_entry["plugins"].append(p) if job_entry["plugins"]: res.append(job_entry) return res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/jobs.py0000664000175000017500000000661100000000000020372 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from sahara import conductor as c from sahara import context from sahara import exceptions as ex from sahara.service import api from sahara.service.edp import job_manager as manager from sahara.utils import edp from sahara.utils import proxy as p conductor = c.API LOG = logging.getLogger(__name__) def execute_job(data): # Elements common to all job types job_template_id = data['job_template_id'] cluster_id = data['cluster_id'] configs = data.get('job_configs', {}) interface = data.get('interface', {}) # Not in Java job types but present for all others input_id = data.get('input_id', None) output_id = data.get('output_id', None) # Since we will use a unified class in the database, we pass # a superset for all job types # example configs['start'] = '2015-05-12T08:55Z' frequency = 5 mins # the job will starts from 2015-05-12T08:55Z, runs every 5 mins job_execution_info = data.get('job_execution_info', {}) configs['job_execution_info'] = job_execution_info job_ex_dict = {'input_id': input_id, 'output_id': output_id, 'job_id': job_template_id, 'cluster_id': cluster_id, 'info': {'status': edp.JOB_STATUS_PENDING}, 'job_configs': configs, 'extra': {}, 'interface': interface} job_execution = conductor.job_execution_create(context.ctx(), job_ex_dict) context.set_current_job_execution_id(job_execution.id) # check to use proxy user if p.job_execution_requires_proxy_user(job_execution): try: p.create_proxy_user_for_job_execution(job_execution) except ex.SaharaException as e: LOG.error("Can't run job execution. " "(Reasons: {reason})".format(reason=e)) conductor.job_execution_destroy(context.ctx(), job_execution) raise e api.OPS.run_edp_job(job_execution.id) return job_execution def job_execution_list(**kwargs): return conductor.job_execution_get_all(context.ctx(), regex_search=True, **kwargs) def get_job_execution(id, refresh_status=False): if refresh_status: return manager.get_job_status(id) else: return conductor.job_execution_get(context.ctx(), id) def update_job_execution(id, values): _update_status(values.pop("info", None), id) return conductor.job_execution_update(context.ctx(), id, values) def _update_status(info, id): if info: status = info.get("status", None) if status == edp.JOB_ACTION_SUSPEND: api.OPS.job_execution_suspend(id) if status == edp.JOB_ACTION_CANCEL: api.OPS.cancel_job_execution(id) def delete_job_execution(id): context.set_current_job_execution_id(id) api.OPS.delete_job_execution(id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/node_group_templates.py0000664000175000017500000000261100000000000023650 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c from sahara import context conductor = c.API # NodeGroupTemplate ops def get_node_group_templates(**kwargs): return conductor.node_group_template_get_all(context.ctx(), regex_search=True, **kwargs) def get_node_group_template(id): return conductor.node_group_template_get(context.ctx(), id) def create_node_group_template(values): return conductor.node_group_template_create(context.ctx(), values) def terminate_node_group_template(id): return conductor.node_group_template_destroy(context.ctx(), id) def update_node_group_template(id, values): return conductor.node_group_template_update(context.ctx(), id, values) def export_node_group_template(id): return conductor.node_group_template_get(context.ctx(), id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/api/v2/plugins.py0000664000175000017500000000166600000000000021123 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.plugins import base as plugin_base # Plugins ops def get_plugins(): return plugin_base.PLUGINS.get_plugins(serialized=True) def get_plugin(plugin_name, version=None): return plugin_base.PLUGINS.serialize_plugin(plugin_name, version) def update_plugin(plugin_name, values): return plugin_base.PLUGINS.update_plugin(plugin_name, values) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.729891 sahara-16.0.0/sahara/service/castellan/0000775000175000017500000000000000000000000017725 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/castellan/__init__.py0000664000175000017500000000000000000000000022024 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/castellan/config.py0000664000175000017500000000364400000000000021553 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from castellan import options as castellan from oslo_config import cfg from sahara.utils.openstack import base as utils opts = [ cfg.BoolOpt('use_barbican_key_manager', default=False, help='Enable the usage of the OpenStack Key Management ' 'service provided by barbican.'), ] castellan_opts = [ cfg.StrOpt('barbican_api_endpoint', help='The endpoint to use for connecting to the barbican ' 'api controller. By default, castellan will use the ' 'URL from the service catalog.'), cfg.StrOpt('barbican_api_version', default='v1', help='Version of the barbican API, for example: "v1"'), ] castellan_group = cfg.OptGroup(name='castellan', title='castellan key manager options') CONF = cfg.CONF CONF.register_group(castellan_group) CONF.register_opts(opts) CONF.register_opts(castellan_opts, group=castellan_group) def validate_config(): if CONF.use_barbican_key_manager: # NOTE (elmiko) there is no need to set the api_class as castellan # uses barbican by default. castellan.set_defaults(CONF, auth_endpoint=utils.retrieve_auth_url()) else: castellan.set_defaults(CONF, api_class='sahara.service.castellan.' 'sahara_key_manager.SaharaKeyManager') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/castellan/sahara_key_manager.py0000664000175000017500000000553300000000000024106 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from castellan.common.objects import passphrase as key from castellan.key_manager import key_manager as km """sahara.service.castellan.sahara_key_manager This module contains the KeyManager class that will be used by the castellan library, it is not meant for direct usage within sahara. """ class SaharaKeyManager(km.KeyManager): """Sahara specific key manager This manager is a thin wrapper around the secret being stored. It is intended for backward compatible use only. It will not store keys or generate UUIDs but instead return the secret that is being stored. This behavior allows Sahara to continue storing secrets in its database while using the Castellan key manager abstraction. """ def __init__(self, configuration=None): pass def create_key(self, context, algorithm=None, length=0, expiration=None, **kwargs): """creates a key algorithm, length, and expiration are unused by sahara keys. """ return key.Passphrase(passphrase=kwargs.get('passphrase', '')) def create_key_pair(self, *args, **kwargs): pass def store(self, context, key, expiration=None, **kwargs): """store a key in normal usage a store_key will return the UUID of the key as dictated by the key manager. Sahara would then store this UUID in its database to use for retrieval. As sahara is not actually using a key manager in this context it will return the key's payload for storage. """ return key.get_encoded() def get(self, context, key_id, **kwargs): """get a key since sahara is not actually storing key UUIDs the key_id to this function should actually be the key payload. this function will simply return a new SaharaKey based on that value. """ return key.Passphrase(passphrase=key_id) def delete(self, context, key_id, **kwargs): """delete a key as there is no external key manager, this function will not perform any external actions. therefore, it won't change anything. """ pass def list(self, *args, **kwargs): """list all managed keys current implementation of the key manager does not utilize this """ pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/castellan/utils.py0000664000175000017500000000355600000000000021450 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from castellan.common.objects import passphrase from castellan import key_manager from sahara import context def delete_secret(id, ctx=None): """delete a secret from the external key manager :param id: The identifier of the secret to delete :param ctx: The context, and associated authentication, to use with this operation (defaults to the current context) """ if ctx is None: ctx = context.current() key_manager.API().delete(ctx, id) def get_secret(id, ctx=None): """get a secret associated with an id :param id: The identifier of the secret to retrieve :param ctx: The context, and associated authentication, to use with this operation (defaults to the current context) """ if ctx is None: ctx = context.current() key = key_manager.API().get(ctx, id) return key.get_encoded() def store_secret(secret, ctx=None): """store a secret and return its identifier :param secret: The secret to store, this should be a string :param ctx: The context, and associated authentication, to use with this operation (defaults to the current context) """ if ctx is None: ctx = context.current() key = passphrase.Passphrase(secret) return key_manager.API().store(ctx, key) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/coordinator.py0000664000175000017500000001102100000000000020647 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import bisect import hashlib from oslo_config import cfg from oslo_log import log from oslo_utils import uuidutils from tooz import coordination LOG = log.getLogger(__name__) coordinator_opts = [ cfg.IntOpt('coordinator_heartbeat_interval', default=1, help='Interval size between heartbeat execution in seconds. ' 'Heartbeats are executed to make sure that connection to ' 'the coordination server is active.'), cfg.IntOpt('hash_ring_replicas_count', default=40, help='Number of points that belongs to each member on a hash ' 'ring. The larger number leads to a better distribution.') ] CONF = cfg.CONF CONF.register_opts(coordinator_opts) class Coordinator(object): def __init__(self, backend_url): self.coordinator = None self.member_id = uuidutils.generate_uuid() if backend_url: try: self.coordinator = coordination.get_coordinator( backend_url, self.member_id) self.coordinator.start() LOG.info('Coordination backend loaded successfully.') except coordination.ToozError: LOG.error('Error connecting to coordination backend.') raise def is_started(self): if self.coordinator: return self.coordinator.is_started return False def heartbeat(self): if self.coordinator: self.coordinator.heartbeat() def join_group(self, group_id): if self.coordinator: try: self.coordinator.join_group(group_id).get() except coordination.GroupNotCreated: try: self.coordinator.create_group(group_id).get() except coordination.GroupAlreadyExist: pass self.coordinator.join_group(group_id).get() def get_members(self, group_id): if self.coordinator: for i in range(2): try: members = self.coordinator.get_members(group_id).get() if self.member_id in members: return members self.join_group(group_id) except coordination.GroupNotCreated: self.join_group(group_id) except coordination.ToozError as e: LOG.error("Couldn't get members of {group} group. " "Reason: {ex}".format( group=group_id, ex=str(e))) return [] class HashRing(Coordinator): def __init__(self, backend_url, group_id): self.group_id = group_id self.replicas = CONF.hash_ring_replicas_count super(HashRing, self).__init__(backend_url) self.join_group(group_id) @staticmethod def _hash(key): return int( hashlib.md5(str(key).encode('utf-8')).hexdigest(), 16) # nosec def _build_ring(self): ring = {} members = self.get_members(self.group_id) for member in members: for r in range(self.replicas): hashed_key = self._hash('%s:%s' % (member, r)) ring[hashed_key] = member return ring, sorted(ring.keys()) def _check_object(self, object, ring, sorted_keys): """Checks if this object belongs to this member or not""" hashed_key = self._hash(object.id) position = bisect.bisect(sorted_keys, hashed_key) position = position if position < len(sorted_keys) else 0 return ring[sorted_keys[position]] == self.member_id def get_subset(self, objects): """Returns subset that belongs to this member""" if self.coordinator: ring, keys = self._build_ring() if ring: return [obj for obj in objects if self._check_object( obj, ring, keys)] return [] return objects ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.729891 sahara-16.0.0/sahara/service/edp/0000775000175000017500000000000000000000000016527 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/__init__.py0000664000175000017500000000000000000000000020626 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/base_engine.py0000664000175000017500000000324000000000000021337 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from sahara import conductor as c conductor = c.API def optional(fun): fun.__not_implemented__ = True return fun @six.add_metaclass(abc.ABCMeta) class JobEngine(object): @abc.abstractmethod def cancel_job(self, job_execution): pass @abc.abstractmethod def get_job_status(self, job_execution): pass @abc.abstractmethod def run_job(self, job_execution): pass @abc.abstractmethod def run_scheduled_job(self, job_execution): pass @abc.abstractmethod def validate_job_execution(self, cluster, job, data): pass @staticmethod @abc.abstractmethod def get_possible_job_config(job_type): return None @staticmethod @abc.abstractmethod def get_supported_job_types(): return None @optional def suspend_job(self, job_execution): pass def does_engine_implement(self, fun_name): fun = getattr(self, fun_name) if not (fun and callable(fun)): return False return not hasattr(fun, '__not_implemented__') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.733891 sahara-16.0.0/sahara/service/edp/binary_retrievers/0000775000175000017500000000000000000000000022265 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/binary_retrievers/__init__.py0000664000175000017500000000000000000000000024364 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/binary_retrievers/dispatch.py0000664000175000017500000000444100000000000024441 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import context from sahara.service.edp.binary_retrievers import internal_swift as i_swift from sahara.service.edp.binary_retrievers import manila_share as manila from sahara.service.edp.binary_retrievers import s3_storage as s3 from sahara.service.edp.binary_retrievers import sahara_db as db from sahara.service.edp import s3_common from sahara.swift import utils as su from sahara.utils.openstack import manila as m def get_raw_binary(job_binary, proxy_configs=None, with_context=False, remote=None): '''Get the raw data for a job binary This will retrieve the raw data for a job binary from it's source. In the case of Swift based binaries there is a precedence of credentials for authenticating the client. Requesting a context based authentication takes precedence over proxy user which takes precedence over embedded credentials. :param job_binary: The job binary to retrieve :param proxy_configs: Proxy user configuration to use as credentials :param with_context: Use the current context as credentials :param remote: The remote contains node group and cluster information :returns: The raw data from a job binary ''' url = job_binary.url if url.startswith("internal-db://"): res = db.get_raw_data(context.ctx(), job_binary) if url.startswith(s3_common.S3_JB_PREFIX): res = s3.get_raw_data(job_binary) if url.startswith(su.SWIFT_INTERNAL_PREFIX): if with_context: res = i_swift.get_raw_data_with_context(job_binary) else: res = i_swift.get_raw_data(job_binary, proxy_configs) if url.startswith(m.MANILA_PREFIX): res = manila.get_file_info(job_binary, remote) return res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/binary_retrievers/internal_swift.py0000664000175000017500000000670700000000000025701 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_config import cfg import six import swiftclient import sahara.exceptions as ex from sahara.i18n import _ from sahara.service.castellan import utils as key_manager from sahara.swift import utils as su from sahara.utils.openstack import swift as sw CONF = cfg.CONF def _get_names_from_url(url): parse = six.moves.urllib.parse.urlparse(url) return (parse.netloc + parse.path).split('/', 1) def _get_raw_data(job_binary, conn): names = _get_names_from_url(job_binary.url) container, obj = names # if container name has '.sahara' suffix we need to strip it if container.endswith(su.SWIFT_URL_SUFFIX): container = container[:-len(su.SWIFT_URL_SUFFIX)] try: # First check the size headers = conn.head_object(container, obj) total_KB = int(headers.get('content-length', 0)) / 1024.0 if total_KB > CONF.job_binary_max_KB: raise ex.DataTooBigException( round(total_KB, 1), CONF.job_binary_max_KB, _("Size of swift object (%(size)sKB) is greater " "than maximum (%(maximum)sKB)")) headers, body = conn.get_object(container, obj) except swiftclient.ClientException as e: raise ex.SwiftClientException(six.text_type(e)) return body def _validate_job_binary_url(f): @functools.wraps(f) def wrapper(job_binary, *args, **kwargs): if not (job_binary.url.startswith(su.SWIFT_INTERNAL_PREFIX)): # This should have been guaranteed already, # but we'll check just in case. raise ex.BadJobBinaryException( _("Url for binary in internal swift must start with %s") % su.SWIFT_INTERNAL_PREFIX) names = _get_names_from_url(job_binary.url) if len(names) == 1: # a container has been requested, this is currently unsupported raise ex.BadJobBinaryException( _('Url for binary in internal swift must specify an object ' 'not a container')) return f(job_binary, *args, **kwargs) return wrapper @_validate_job_binary_url def get_raw_data(job_binary, proxy_configs=None): conn_kwargs = {} if proxy_configs: conn_kwargs.update(username=proxy_configs.get('proxy_username'), password=key_manager.get_secret( proxy_configs.get('proxy_password')), trust_id=proxy_configs.get('proxy_trust_id')) else: conn_kwargs.update(username=job_binary.extra.get('user'), password=key_manager.get_secret( job_binary.extra.get('password'))) conn = sw.client(**conn_kwargs) return _get_raw_data(job_binary, conn) @_validate_job_binary_url def get_raw_data_with_context(job_binary): conn = sw.client_from_token() return _get_raw_data(job_binary, conn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/binary_retrievers/manila_share.py0000664000175000017500000000231200000000000025260 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp import job_utils from sahara.service.edp.utils import shares as shares_service def get_file_info(job_binary, remote): shares = [] if remote.instance.node_group.cluster.shares: shares.extend(remote.instance.node_group.cluster.shares) if remote.instance.node_group.shares: shares.extend(remote.instance.node_group.shares) path = shares_service.get_share_path(job_binary.url, shares) if path is None: path = job_utils.mount_share_at_default_path( job_binary.url, remote.instance.node_group.cluster) return {'type': 'path', 'path': path} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/binary_retrievers/s3_storage.py0000664000175000017500000000132300000000000024707 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp import s3_common def get_raw_data(job_binary): return s3_common.get_raw_job_binary_data(job_binary) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/binary_retrievers/sahara_db.py0000664000175000017500000000165100000000000024546 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import conductor as c conductor = c.API def get_raw_data(context, job_binary): # url example: 'internal-db://JobBinaryInternal-UUID' binary_internal_id = job_binary.url[len("internal-db://"):] return conductor.job_binary_internal_get_raw_data(context, binary_internal_id) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.733891 sahara-16.0.0/sahara/service/edp/data_sources/0000775000175000017500000000000000000000000021203 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/__init__.py0000664000175000017500000000000000000000000023302 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/base.py0000664000175000017500000000716100000000000022474 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import random import re import string import six from sahara.plugins import base as plugins_base @six.add_metaclass(abc.ABCMeta) class DataSourceType(object): @plugins_base.required_with_default def construct_url(self, url, job_exec_id): """Resolve placeholders in the data source url Supported placeholders: * %RANDSTR(len)% - will be replaced with random string of lowercase letters of length `len` * %JOB_EXEC_ID% - will be replaced with the job execution ID :param url: String that represents an url with placeholders :param job_exec_id: Id of the job execution :returns: String that is an url without placeholders """ def _randstr(match): random_len = int(match.group(1)) return ''.join(random.choice(string.ascii_lowercase) for _ in six.moves.range(random_len)) url = url.replace("%JOB_EXEC_ID%", job_exec_id) url = re.sub(r"%RANDSTR\((\d+)\)%", _randstr, url) return url @plugins_base.required_with_default def prepare_cluster(self, data_source, cluster, **kwargs): """Makes a cluster ready to use this data source Different implementations for each data source, for HDFS will be configure the cluster, for Swift verify credentials, and so on :param data_source: The object handle to a data source :param cluster: The object handle to a cluster :returns: None """ pass @plugins_base.required_with_default def get_runtime_url(self, url, cluster): """Get the runtime url of the data source for a cluster It will construct a runtime url if needed, if it's not needed it will use the native url as runtime url :param url: String that represents an already constructed url :param cluster: The object handle to a cluster :returns: String representing the runtime url """ return url @plugins_base.required_with_default def get_urls(self, url, cluster, job_exec_id): """Get the native url and runtime url of a determined data source :param url: String that represents a url (constructed or not) :param cluster: The object handle to a cluster :param job_exec_id: Id of the job execution :returns: A tuple of the form (native_url, runtime_url), where the urls are Strings """ native_url = self.construct_url(url, job_exec_id) runtime_url = self.get_runtime_url(native_url, cluster) return (native_url, runtime_url) @plugins_base.required_with_default def validate(self, data): """Method that validates the data passed through the API This method will be executed during the data source creation and update :raise: If data is invalid, InvalidDataException """ pass @plugins_base.optional def _validate_url(self, url): """Auxiliary method used by the validate method""" pass ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.733891 sahara-16.0.0/sahara/service/edp/data_sources/hdfs/0000775000175000017500000000000000000000000022127 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/hdfs/__init__.py0000664000175000017500000000000000000000000024226 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/hdfs/implementation.py0000664000175000017500000000301600000000000025526 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six.moves.urllib.parse as urlparse from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.data_sources.base import DataSourceType from sahara.service.edp import hdfs_helper as h class HDFSType(DataSourceType): def validate(self, data): self._validate_url(data['url']) def _validate_url(self, url): if len(url) == 0: raise ex.InvalidDataException(_("HDFS url must not be empty")) url = urlparse.urlparse(url) if url.scheme: if url.scheme != "hdfs": raise ex.InvalidDataException(_("URL scheme must be 'hdfs'")) if not url.hostname: raise ex.InvalidDataException( _("HDFS url is incorrect, cannot determine a hostname")) def prepare_cluster(self, data_source, cluster, **kwargs): runtime_url = kwargs.pop('runtime_url') h.configure_cluster_for_hdfs(cluster, runtime_url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/manager.py0000664000175000017500000000543700000000000023200 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging import six import six.moves.urllib.parse as urlparse from stevedore import enabled from sahara import conductor as cond from sahara import exceptions as ex from sahara.i18n import _ conductor = cond.API LOG = logging.getLogger(__name__) CONF = cfg.CONF class DataSourceManager(object): def __init__(self): self.data_sources = {} self._load_data_sources() def _load_data_sources(self): config_ds = CONF.data_source_types extension_manager = enabled.EnabledExtensionManager( check_func=lambda ext: ext.name in config_ds, namespace='sahara.data_source.types', invoke_on_load=True ) for ext in extension_manager.extensions: if ext.name in self.data_sources: raise ex.ConfigurationError( _("Data source with name '%s' already exists.") % ext.name) ext.obj.name = ext.name self.data_sources[ext.name] = ext.obj LOG.info("Data source name {ds_name} loaded {entry_point}".format( ds_name=ext.name, entry_point=ext.entry_point_target)) if len(self.data_sources) < len(config_ds): loaded_ds = set(six.iterkeys(self.data_sources)) requested_ds = set(config_ds) raise ex.ConfigurationError( _("Data sources couldn't be loaded: %s") % ", ".join(requested_ds - loaded_ds)) def get_data_sources(self): config_ds = CONF.data_source_types return [self.get_data_source(name).name for name in config_ds] def get_data_source(self, name): res = self.data_sources.get(name) if res is None: raise ex.InvalidDataException(_("Invalid data source")) return res def get_data_source_by_url(self, url): url = urlparse.urlparse(url) if not url.scheme: raise ex.InvalidDataException(_("Data source url must have a" " scheme")) return self.get_data_source(url.scheme) DATA_SOURCES = None def setup_data_sources(): global DATA_SOURCES DATA_SOURCES = DataSourceManager() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.733891 sahara-16.0.0/sahara/service/edp/data_sources/manila/0000775000175000017500000000000000000000000022444 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/manila/__init__.py0000664000175000017500000000000000000000000024543 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/manila/implementation.py0000664000175000017500000000513700000000000026051 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils import six.moves.urllib.parse as urlparse from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.data_sources.base import DataSourceType from sahara.service.edp import job_utils from sahara.service.edp.utils import shares as shares_service class ManilaType(DataSourceType): def validate(self, data): self._validate_url(data['url']) def _validate_url(self, url): if len(url) == 0: raise ex.InvalidDataException(_("Manila url must not be empty")) url = urlparse.urlparse(url) if url.scheme != "manila": raise ex.InvalidDataException(_("Manila url scheme must be" " 'manila'")) if not uuidutils.is_uuid_like(url.netloc): raise ex.InvalidDataException(_("Manila url netloc must be a" " uuid")) if not url.path: raise ex.InvalidDataException(_("Manila url path must not be" " empty")) def _prepare_cluster(self, url, cluster): path = self._get_share_path(url, cluster.shares or []) if path is None: path = job_utils.mount_share_at_default_path(url, cluster) return path def get_runtime_url(self, url, cluster): # TODO(mariannelm): currently the get_runtime_url method is responsible # for preparing the cluster for the manila job type which is not the # best approach. In order to make a prepare_cluster method for manila # the edp/job_utils.py resolve_data_source_reference function must be # refactored path = self._prepare_cluster(url, cluster) # This gets us the mount point, but we need a file:// scheme to # indicate a local filesystem path return "file://{path}".format(path=path) def _get_share_path(self, url, shares): return shares_service.get_share_path(url, shares) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.733891 sahara-16.0.0/sahara/service/edp/data_sources/maprfs/0000775000175000017500000000000000000000000022473 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/maprfs/__init__.py0000664000175000017500000000000000000000000024572 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/maprfs/implementation.py0000664000175000017500000000223700000000000026076 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six.moves.urllib.parse as urlparse from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.data_sources.base import DataSourceType class MapRFSType(DataSourceType): def validate(self, data): self._validate_url(data['url']) def _validate_url(self, url): if len(url) == 0: raise ex.InvalidDataException(_("MapR FS url must not be empty")) url = urlparse.urlparse(url) if url.scheme: if url.scheme != "maprfs": raise ex.InvalidDataException(_("URL scheme must be 'maprfs'")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/opts.py0000664000175000017500000000173700000000000022552 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # File contains data sources opts to avoid cyclic imports issue from oslo_config import cfg opts = [ cfg.ListOpt('data_source_types', default=['swift', 'hdfs', 'maprfs', 'manila', 's3'], help='List of data sources types to be loaded. Sahara ' 'preserves the order of the list when returning it.'), ] CONF = cfg.CONF CONF.register_opts(opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.733891 sahara-16.0.0/sahara/service/edp/data_sources/s3/0000775000175000017500000000000000000000000021530 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/s3/__init__.py0000664000175000017500000000000000000000000023627 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/s3/implementation.py0000664000175000017500000000630500000000000025133 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Contributors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six.moves.urllib.parse as urlparse from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.data_sources.base import DataSourceType from sahara.service.edp import s3_common from sahara.utils import types class S3Type(DataSourceType): configs_map = {"accesskey": s3_common.S3_ACCESS_KEY_CONFIG, "secretkey": s3_common.S3_SECRET_KEY_CONFIG, "endpoint": s3_common.S3_ENDPOINT_CONFIG, "bucket_in_path": s3_common.S3_BUCKET_IN_PATH_CONFIG, "ssl": s3_common.S3_SSL_CONFIG} bool_keys = ["bucket_in_path", "ssl"] def validate(self, data): self._validate_url(data['url']) # Do validation loosely, and don't require much... the user might have # (by their own preference) set some or all configs manually if "credentials" not in data: return for key in data["credentials"].keys(): if key not in self.configs_map.keys(): raise ex.InvalidDataException( _("Unknown config '%s' for S3 data source") % key) if key in self.bool_keys: if not isinstance(data["credentials"][key], bool): raise ex.InvalidDataException( _("Config '%s' must be boolean") % key) def _validate_url(self, url): if len(url) == 0: raise ex.InvalidDataException(_("S3 url must not be empty")) url = urlparse.urlparse(url) if url.scheme not in ["s3", "s3a"]: raise ex.InvalidDataException( _("URL scheme must be 's3' or 's3a'")) if not url.hostname: raise ex.InvalidDataException(_("Bucket name must be present")) if not url.path: raise ex.InvalidDataException(_("Object name must be present")) def prepare_cluster(self, data_source, cluster, **kwargs): if hasattr(data_source, "credentials"): job_configs = kwargs.pop('job_configs') if isinstance(job_configs, types.FrozenDict): return if job_configs.get('configs', None) is None: return creds = data_source.credentials job_conf = job_configs['configs'] for config_name, s3a_cfg_name in self.configs_map.items(): if job_conf.get(s3a_cfg_name, None) is None: # no overwrite if creds.get(config_name, None) is not None: job_conf[s3a_cfg_name] = creds[config_name] def get_runtime_url(self, url, cluster): return url.replace("s3://", "s3a://", 1) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.733891 sahara-16.0.0/sahara/service/edp/data_sources/swift/0000775000175000017500000000000000000000000022337 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/swift/__init__.py0000664000175000017500000000000000000000000024436 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/data_sources/swift/implementation.py0000664000175000017500000000705200000000000025742 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import six.moves.urllib.parse as urlparse from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.data_sources.base import DataSourceType from sahara.swift import swift_helper as sw from sahara.swift import utils as su from sahara.utils.types import FrozenDict CONF = cfg.CONF class SwiftType(DataSourceType): def validate(self, data): self._validate_url(data['url']) if not CONF.use_domain_for_proxy_users and "credentials" not in data: raise ex.InvalidCredentials(_("No credentials provided for Swift")) if not CONF.use_domain_for_proxy_users and ( "user" not in data["credentials"]): raise ex.InvalidCredentials( _("User is not provided in credentials for Swift")) if not CONF.use_domain_for_proxy_users and ( "password" not in data["credentials"]): raise ex.InvalidCredentials( _("Password is not provided in credentials for Swift")) def _validate_url(self, url): if len(url) == 0: raise ex.InvalidDataException(_("Swift url must not be empty")) url = urlparse.urlparse(url) if url.scheme != "swift": raise ex.InvalidDataException(_("URL scheme must be 'swift'")) # The swift url suffix does not have to be included in the netloc. # However, if the swift suffix indicator is part of the netloc then # we require the right suffix. # Additionally, the path must be more than '/' if (su.SWIFT_URL_SUFFIX_START in url.netloc and not url.netloc.endswith(su.SWIFT_URL_SUFFIX)) or len(url.path) <= 1: raise ex.InvalidDataException( _("URL must be of the form swift://container%s/object") % su.SWIFT_URL_SUFFIX) def prepare_cluster(self, data_source, cluster, **kwargs): if hasattr(data_source, "credentials"): job_configs = kwargs.pop('job_configs') # if no data source was passed as a reference for the job, the # job_configs will not be changed (so it will be a FronzenDict) # and we don't need to change it as well if isinstance(job_configs, FrozenDict) or \ job_configs.get('configs', None) is None: return if not job_configs.get('proxy_configs'): username = data_source.credentials['user'] password = data_source.credentials['password'] # Don't overwrite if there is already a value here if job_configs['configs'].get(sw.HADOOP_SWIFT_USERNAME, None) \ is None and (username is not None): job_configs['configs'][sw.HADOOP_SWIFT_USERNAME] = username if job_configs['configs'].get(sw.HADOOP_SWIFT_PASSWORD, None) \ is None and (password is not None): job_configs['configs'][sw.HADOOP_SWIFT_PASSWORD] = password ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/hdfs_helper.py0000664000175000017500000001075000000000000021367 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils import six from six.moves.urllib import parse as urlparse from sahara import conductor as c from sahara import context from sahara.plugins import exceptions as ex from sahara.plugins import utils as u from sahara.utils import cluster as cluster_utils conductor = c.API HBASE_COMMON_LIB_PATH = "/user/sahara-hbase-lib" def create_hbase_common_lib(r): r.execute_command( 'sudo su - -c "hdfs dfs -mkdir -p %s" hdfs' % ( HBASE_COMMON_LIB_PATH)) ret_code, stdout = r.execute_command( 'hbase classpath') if ret_code == 0: paths = stdout.split(':') for p in paths: if p.endswith(".jar"): r.execute_command('sudo su - -c "hdfs dfs -put -p %s %s" hdfs' % (p, HBASE_COMMON_LIB_PATH)) else: raise ex.RequiredServiceMissingException('hbase') def put_file_to_hdfs(r, file, file_name, path, hdfs_user): tmp_file_name = '%s.%s' % (file_name, six.text_type( uuidutils.generate_uuid())) r.write_file_to('/tmp/%s' % tmp_file_name, file) move_from_local(r, '/tmp/%s' % tmp_file_name, path + '/' + file_name, hdfs_user) def copy_from_local(r, source, target, hdfs_user): r.execute_command('sudo su - -c "hdfs dfs -copyFromLocal ' '%s %s" %s' % (source, target, hdfs_user)) def move_from_local(r, source, target, hdfs_user): # using copyFromLocal followed by rm to address permission issues that # arise when image user is not the same as hdfs user (permissions-wise). r.execute_command('sudo su - -c "hdfs dfs -copyFromLocal %(source)s ' '%(target)s" %(user)s && sudo rm -f %(source)s' % {"source": source, "target": target, "user": hdfs_user}) def create_dir_hadoop1(r, dir_name, hdfs_user): r.execute_command( 'sudo su - -c "hdfs dfs -mkdir %s" %s' % (dir_name, hdfs_user)) def create_dir_hadoop2(r, dir_name, hdfs_user): r.execute_command( 'sudo su - -c "hdfs dfs -mkdir -p %s" %s' % (dir_name, hdfs_user)) def _get_cluster_hosts_information(host, cluster): for clust in conductor.cluster_get_all(context.ctx()): if clust.id == cluster.id: continue for i in u.get_instances(clust): if i.instance_name == host: return cluster_utils.generate_etc_hosts(clust) return None def _is_cluster_configured(cluster, host_info): inst = u.get_instances(cluster)[0] cat_etc_hosts = 'cat /etc/hosts' with inst.remote() as r: exit_code, etc_hosts = r.execute_command(cat_etc_hosts) return all(host in etc_hosts for host in host_info) def configure_cluster_for_hdfs(cluster, data_source_url): host = urlparse.urlparse(data_source_url).hostname etc_hosts_information = _get_cluster_hosts_information(host, cluster) if etc_hosts_information is None: # Ip address hasn't been resolved, the last chance is for VM itself return # If the cluster was already configured for this data source # there's no need to configure it again if _is_cluster_configured(cluster, etc_hosts_information.splitlines()): return etc_hosts_update = ('/tmp/etc-hosts-update' '.%s' % six.text_type(uuidutils.generate_uuid())) tmp_etc_hosts = ('/tmp/etc-hosts' '.%s' % six.text_type(uuidutils.generate_uuid())) update_etc_hosts_cmd = ( 'cat %(etc_hosts_update)s /etc/hosts | ' 'sort | uniq > %(tmp_etc_hosts)s && ' 'cat %(tmp_etc_hosts)s > /etc/hosts && ' 'rm -f %(tmp_etc_hosts)s %(etc_hosts_update)s' % {'etc_hosts_update': etc_hosts_update, 'tmp_etc_hosts': tmp_etc_hosts}) for inst in u.get_instances(cluster): with inst.remote() as r: r.write_file_to(etc_hosts_update, etc_hosts_information) r.execute_command(update_etc_hosts_cmd, run_as_root=True) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.737891 sahara-16.0.0/sahara/service/edp/job_binaries/0000775000175000017500000000000000000000000021155 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/__init__.py0000664000175000017500000000000000000000000023254 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/base.py0000664000175000017500000000650200000000000022444 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from sahara import exceptions as ex from sahara.plugins import base as plugins_base @six.add_metaclass(abc.ABCMeta) class JobBinaryType(object): @plugins_base.required_with_default def prepare_cluster(self, job_binary, **kwargs): """Makes a cluster ready to use this job binary Different implementations for each job binary type for Manila it will be mount the share, for Swift verify credentials, and so on :param job_binary: The object handle to a job binary :returns: None """ pass @plugins_base.required def copy_binary_to_cluster(self, job_binary, **kwargs): """Get the path for the binary in a cluster If necessary, pull binary data from the binary store, and copy that data to a useful path on the cluster. Then returns a valid FS path for the job binary in the cluster :param job_binary: The object handle to a job binary :returns: String representing the local path """ # TODO(mariannelm): currently for the job binaries it's true # that the raw data must be available at a FS path in the cluster, but # for most of the job binary types there's no need to keep this data # in the cluster after the job is done, so it would be a good thing to # have a method responsible for removing the job binary raw data # after the end of the job return None @plugins_base.required_with_default def validate(self, data, **kwargs): """Method that validate the data passed through the API This method will be executed during the job binary creation and update :raise: If data is invalid, InvalidDataException """ pass @plugins_base.optional def _validate_url(self, url): """Auxiliary method used by the validate method""" pass @plugins_base.required_with_default def validate_job_location_format(self, entry): """Checks whether or not the API entry is valid :param entry: String that represents a job binary url :returns: True if this entry is valid, False otherwhise """ return True @plugins_base.required_with_default def get_raw_data(self, job_binary, **kwargs): """Get the raw binary Used only by the API, if the type doesn't support this operation it should raise NotImplementedException :param job_binary: The object handle to a job binary :returns: Raw binary """ raise ex.NotImplementedException() @plugins_base.optional def _generate_valid_path(self, job_binary): """Generates a valid FS path for the binary be placed""" return '/tmp/' + job_binary.name ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.737891 sahara-16.0.0/sahara/service/edp/job_binaries/internal_db/0000775000175000017500000000000000000000000023436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/internal_db/__init__.py0000664000175000017500000000000000000000000025535 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/internal_db/implementation.py0000664000175000017500000000476300000000000027047 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_utils import uuidutils import six.moves.urllib.parse as urlparse from sahara import conductor as c from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.job_binaries.base import JobBinaryType import sahara.service.validations.edp.base as b CONF = cfg.CONF conductor = c.API class InternalDBType(JobBinaryType): def copy_binary_to_cluster(self, job_binary, **kwargs): # url example: 'internal-db://JobBinaryInternal-UUID' r = kwargs.pop('remote') dst = self._generate_valid_path(job_binary) raw = self.get_raw_data(job_binary, **kwargs) r.write_file_to(dst, raw) return dst def get_raw_data(self, job_binary, **kwargs): context = kwargs.pop('context') # url example: 'internal-db://JobBinaryInternal-UUID' binary_internal_id = job_binary.url[len("internal-db://"):] return conductor.job_binary_internal_get_raw_data(context, binary_internal_id) def validate_job_location_format(self, url): try: self._validate_url(url) except ex.InvalidDataException: return False return True def validate(self, data, **kwargs): self._validate_url(data['url']) internal_uid = data['url'].replace("internal-db://", '') b.check_job_binary_internal_exists(internal_uid) def _validate_url(self, url): if len(url) == 0: raise ex.InvalidDataException( _("Internal data base url must not be empty")) url = urlparse.urlparse(url) if url.scheme != "internal-db": raise ex.InvalidDataException( _("URL scheme must be 'internal-db'")) if not uuidutils.is_uuid_like(url.netloc): raise ex.InvalidDataException( _("Internal data base url netloc must be a uuid")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/manager.py0000664000175000017500000000536400000000000023151 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging import six import six.moves.urllib.parse as urlparse from stevedore import enabled from sahara import conductor as cond from sahara import exceptions as ex from sahara.i18n import _ conductor = cond.API LOG = logging.getLogger(__name__) CONF = cfg.CONF class JobBinaryManager(object): def __init__(self): self.job_binaries = {} self._load_job_binaries() def _load_job_binaries(self): config_jb = CONF.job_binary_types extension_manager = enabled.EnabledExtensionManager( check_func=lambda ext: ext.name in config_jb, namespace='sahara.job_binary.types', invoke_on_load=True ) for ext in extension_manager.extensions: if ext.name in self.job_binaries: raise ex.ConfigurationError( _("Job binary with name '%s' already exists.") % ext.name) ext.obj.name = ext.name self.job_binaries[ext.name] = ext.obj LOG.info("Job binary name {jb_name} loaded {entry_point}".format( jb_name=ext.name, entry_point=ext.entry_point_target)) if len(self.job_binaries) < len(config_jb): loaded_jb = set(six.iterkeys(self.job_binaries)) requested_jb = set(config_jb) raise ex.ConfigurationError( _("Job binaries couldn't be loaded: %s") % ", ".join(requested_jb - loaded_jb)) def get_job_binaries(self): config_jb = CONF.job_binary_types return [self.get_job_binary(name).name for name in config_jb] def get_job_binary(self, name): res = self.job_binaries.get(name) if res is None: raise ex.InvalidDataException(_("Invalid job binary")) return res def get_job_binary_by_url(self, url): url = urlparse.urlparse(url) if not url.scheme: raise ex.InvalidDataException( _("Job binary url must have a scheme")) return self.get_job_binary(url.scheme) JOB_BINARIES = None def setup_job_binaries(): global JOB_BINARIES JOB_BINARIES = JobBinaryManager() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.737891 sahara-16.0.0/sahara/service/edp/job_binaries/manila/0000775000175000017500000000000000000000000022416 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/manila/__init__.py0000664000175000017500000000000000000000000024515 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/manila/implementation.py0000664000175000017500000000663300000000000026025 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils import six.moves.urllib.parse as urlparse from sahara import conductor as c from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.job_binaries.base import JobBinaryType from sahara.service.edp import job_utils from sahara.service.edp.utils import shares as shares_service from sahara.utils.openstack import manila as m conductor = c.API class ManilaType(JobBinaryType): def copy_binary_to_cluster(self, job_binary, **kwargs): remote = kwargs.pop('remote') path = self._get_share_path(job_binary, remote) # if path is None, then it was mounted in the default path # by the prepare_cluster method so just construct the # default path and return it if path is None: url = urlparse.urlparse(job_binary.url) share_id = url.netloc mount_point = shares_service.default_mount(share_id) path = shares_service.make_share_path(mount_point, url.path) return path def prepare_cluster(self, job_binary, **kwargs): remote = kwargs.pop('remote') path = self._get_share_path(job_binary, remote) if path is None: path = job_utils.mount_share_at_default_path( job_binary.url, remote.instance.node_group.cluster) def _get_share_path(self, job_binary, remote): shares = [] if remote.instance.node_group.cluster.shares: shares.extend(remote.instance.node_group.cluster.shares) if remote.instance.node_group.shares: shares.extend(remote.instance.node_group.shares) path = shares_service.get_share_path(job_binary.url, shares) return path def validate_job_location_format(self, url): if url.startswith(m.MANILA_PREFIX): url = urlparse.urlparse(url) return (uuidutils.is_uuid_like(url.netloc) and url.path) else: return False def validate(self, data, **kwargs): self._validate_url(data['url']) def _validate_url(self, url): if len(url) == 0: raise ex.InvalidDataException(_("Manila url must not be empty")) url = urlparse.urlparse(url) if url.scheme != "manila": raise ex.InvalidDataException(_("Manila url scheme must be" " 'manila'")) if not uuidutils.is_uuid_like(url.netloc): raise ex.InvalidDataException(_("Manila url netloc must be a" " uuid")) if not url.path: raise ex.InvalidDataException(_("Manila url path must not be" " empty")) def get_raw_data(self, job_binary, **kwargs): raise ex.NotImplementedException('Manila does not implement this ' 'method') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/opts.py0000664000175000017500000000173100000000000022516 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # File contains job binaries opts to avoid cyclic imports issue from oslo_config import cfg opts = [ cfg.ListOpt('job_binary_types', default=['swift', 'manila', 'internal-db', 's3'], help='List of job binary types to be loaded. Sahara ' 'preserves the order of the list when returning it.'), ] CONF = cfg.CONF CONF.register_opts(opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.737891 sahara-16.0.0/sahara/service/edp/job_binaries/s3/0000775000175000017500000000000000000000000021502 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/s3/__init__.py0000664000175000017500000000000000000000000023601 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/s3/implementation.py0000664000175000017500000000345600000000000025111 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six import six.moves.urllib.parse as urlparse import sahara.exceptions as ex from sahara.i18n import _ from sahara.service.edp.job_binaries.base import JobBinaryType from sahara.service.edp import s3_common class S3Type(JobBinaryType): def copy_binary_to_cluster(self, job_binary, **kwargs): r = kwargs.pop('remote') dst = self._generate_valid_path(job_binary) raw = self.get_raw_data(job_binary) r.write_file_to(dst, raw) return dst def validate_job_location_format(self, url): url = urlparse.urlparse(url) return url.scheme == "s3" and url.hostname def validate(self, data, **kwargs): # We only check on create, not update if not kwargs.get('job_binary_id', None): s3_common._validate_job_binary_url(data['url']) extra = data.get("extra", {}) if (six.viewkeys(extra) != {"accesskey", "secretkey", "endpoint"}): raise ex.InvalidDataException( _("Configs 'accesskey', 'secretkey', and 'endpoint'" " must be provided.")) def get_raw_data(self, job_binary, **kwargs): return s3_common.get_raw_job_binary_data(job_binary) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.737891 sahara-16.0.0/sahara/service/edp/job_binaries/swift/0000775000175000017500000000000000000000000022311 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/swift/__init__.py0000664000175000017500000000000000000000000024410 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_binaries/swift/implementation.py0000664000175000017500000001111300000000000025705 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import six import six.moves.urllib.parse as urlparse import swiftclient import sahara.exceptions as ex from sahara.i18n import _ from sahara.service.castellan import utils as key_manager from sahara.service.edp.job_binaries.base import JobBinaryType from sahara.swift import utils as su from sahara.utils.openstack import swift as sw CONF = cfg.CONF class SwiftType(JobBinaryType): def copy_binary_to_cluster(self, job_binary, **kwargs): r = kwargs.pop('remote') dst = self._generate_valid_path(job_binary) raw = self.get_raw_data(job_binary, **kwargs) r.write_file_to(dst, raw) return dst def validate_job_location_format(self, url): url = urlparse.urlparse(url) return url.scheme == "swift" and url.hostname def validate(self, data, **kwargs): if not kwargs.get('job_binary_id', None): extra = data.get("extra", {}) # Should not be checked during job binary update if (not extra.get("user") or not extra.get("password")) and ( not CONF.use_domain_for_proxy_users): raise ex.BadJobBinaryException() def get_raw_data(self, job_binary, **kwargs): self._validate_job_binary_url(job_binary) proxy_configs = kwargs.pop('proxy_configs', None) with_context = kwargs.pop('with_context', False) if not with_context: conn_kwargs = {} if proxy_configs: conn_kwargs.update(username=proxy_configs.get( 'proxy_username'), password=key_manager.get_secret( proxy_configs.get('proxy_password')), trust_id=proxy_configs.get( 'proxy_trust_id')) else: conn_kwargs.update(username=job_binary.extra.get('user'), password=key_manager.get_secret( job_binary.extra.get('password'))) conn = sw.client(**conn_kwargs) else: conn = sw.client_from_token() raw = self._get_raw_data(job_binary, conn) return raw def _get_names_from_url(self, url): parse = six.moves.urllib.parse.urlparse(url) return (parse.netloc + parse.path).split('/', 1) def _get_raw_data(self, job_binary, conn): names = self._get_names_from_url(job_binary.url) container, obj = names # if container name has '.sahara' suffix we need to strip it if container.endswith(su.SWIFT_URL_SUFFIX): container = container[:-len(su.SWIFT_URL_SUFFIX)] try: # First check the size headers = conn.head_object(container, obj) total_KB = int(headers.get('content-length', 0)) / 1024.0 if total_KB > CONF.job_binary_max_KB: raise ex.DataTooBigException( round(total_KB, 1), CONF.job_binary_max_KB, _("Size of swift object (%(size)sKB) is greater " "than maximum (%(maximum)sKB)")) headers, body = conn.get_object(container, obj) except swiftclient.ClientException as e: raise ex.SwiftClientException(six.text_type(e)) return body def _validate_job_binary_url(self, job_binary): if not (job_binary.url.startswith(su.SWIFT_INTERNAL_PREFIX)): # This should have been guaranteed already, # but we'll check just in case. raise ex.BadJobBinaryException( _("Url for binary in internal swift must start with %s") % su.SWIFT_INTERNAL_PREFIX) names = self._get_names_from_url(job_binary.url) if len(names) == 1: # a container has been requested, this is currently unsupported raise ex.BadJobBinaryException( _('Url for binary in internal swift must specify an ' 'object not a container')) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_manager.py0000664000175000017500000002407400000000000021354 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from sahara import conductor as c from sahara import context from sahara import exceptions as e from sahara.i18n import _ from sahara.service.edp import job_utils from sahara.service.edp.oozie import engine as oozie_engine from sahara.service.edp.spark import engine as spark_engine from sahara.service.edp.storm import engine as storm_engine from sahara.utils import cluster as c_u from sahara.utils import edp from sahara.utils import proxy as p LOG = log.getLogger(__name__) CONF = cfg.CONF conductor = c.API ENGINES = [oozie_engine.OozieJobEngine, spark_engine.SparkJobEngine, storm_engine.StormJobEngine, storm_engine.StormPyleusJobEngine] def _get_job_type(job_execution): return conductor.job_get(context.ctx(), job_execution.job_id).type def get_job_engine(cluster, job_execution): return job_utils.get_plugin(cluster).get_edp_engine(cluster, _get_job_type( job_execution)) def _write_job_status(job_execution, job_info): update = {"info": job_info} if job_info['status'] in edp.JOB_STATUSES_TERMINATED: update['end_time'] = datetime.datetime.now() job_configs = p.delete_proxy_user_for_job_execution(job_execution) if job_configs: update['job_configs'] = job_configs return conductor.job_execution_update(context.ctx(), job_execution, update) def _update_job_status(engine, job_execution): job_info = engine.get_job_status(job_execution) if job_info is not None: job_execution = _write_job_status(job_execution, job_info) return job_execution def _update_job_execution_extra(cluster, job_execution): # tmckay-fp we can make this slightly more efficient in # the use_namespaces case by asking the engine if it knows # the submission machine, and checking if that machine has # a floating ip. if (CONF.use_namespaces or CONF.proxy_command): info = cluster.node_groups[0].instances[0].remote().get_neutron_info() extra = job_execution.extra.copy() extra['neutron'] = info job_execution = conductor.job_execution_update( context.ctx(), job_execution.id, {'extra': extra}) return job_execution def _run_job(job_execution_id): ctx = context.ctx() job_execution = conductor.job_execution_get(ctx, job_execution_id) cluster = conductor.cluster_get(ctx, job_execution.cluster_id) if cluster is None or cluster.status != c_u.CLUSTER_STATUS_ACTIVE: LOG.info("Can not run this job on a non-existant cluster or a " "inactive cluster.") return eng = get_job_engine(cluster, job_execution) if eng is None: raise e.EDPError(_("Cluster does not support job type %s") % _get_job_type(job_execution)) job_execution = _update_job_execution_extra(cluster, job_execution) # Job id is a string # Status is a string # Extra is a dictionary to add to extra in the job_execution if job_execution.job_configs.job_execution_info.get('job_execution_type' ) == 'scheduled': jid, status, extra = eng.run_scheduled_job(job_execution) else: jid, status, extra = eng.run_job(job_execution) # Set the job id and the start time # Optionally, update the status and the 'extra' field update_dict = {'engine_job_id': jid, 'start_time': datetime.datetime.now()} if status: update_dict['info'] = {'status': status} if extra: curr_extra = job_execution.extra.copy() if 'neutron' in curr_extra: curr_extra['neutron'] = curr_extra['neutron'].copy() curr_extra.update(extra) update_dict['extra'] = curr_extra job_execution = conductor.job_execution_update( ctx, job_execution, update_dict) def run_job(job_execution_id): try: _run_job(job_execution_id) except Exception as ex: LOG.exception("Can't run job execution (reason: {reason})".format( reason=ex)) job_execution = conductor.job_execution_get( context.ctx(), job_execution_id) if job_execution.engine_job_id is not None: cancel_job(job_execution_id) conductor.job_execution_update( context.ctx(), job_execution_id, {'info': {'status': edp.JOB_STATUS_FAILED}, 'start_time': datetime.datetime.now(), 'end_time': datetime.datetime.now()}) def cancel_job(job_execution_id): ctx = context.ctx() job_execution = conductor.job_execution_get(ctx, job_execution_id) if job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED: LOG.info("Job execution is already finished and shouldn't be canceled") return job_execution cluster = conductor.cluster_get(ctx, job_execution.cluster_id) if cluster is None: LOG.info("Can not cancel this job on a non-existant cluster.") return job_execution engine = get_job_engine(cluster, job_execution) if engine is not None: job_execution = conductor.job_execution_update( ctx, job_execution_id, {'info': {'status': edp.JOB_STATUS_TOBEKILLED}}) timeout = CONF.job_canceling_timeout s_time = timeutils.utcnow() while timeutils.delta_seconds(s_time, timeutils.utcnow()) < timeout: if job_execution.info['status'] not in edp.JOB_STATUSES_TERMINATED: try: job_info = engine.cancel_job(job_execution) except Exception as ex: job_info = None LOG.warning("Error during cancel of job execution: " "{error}".format(error=ex)) if job_info is not None: job_execution = _write_job_status(job_execution, job_info) LOG.info("Job execution was canceled successfully") return job_execution context.sleep(3) job_execution = conductor.job_execution_get( ctx, job_execution_id) if not job_execution: LOG.info("Job execution was deleted. " "Canceling current operation.") return job_execution else: LOG.info("Job execution status: {status}").format( status=job_execution.info['status']) return job_execution else: raise e.CancelingFailed(_('Job execution %s was not canceled') % job_execution.id) def get_job_status(job_execution_id): ctx = context.ctx() job_execution = conductor.job_execution_get(ctx, job_execution_id) cluster = conductor.cluster_get(ctx, job_execution.cluster_id) if (cluster is not None and cluster.status == c_u.CLUSTER_STATUS_ACTIVE): engine = get_job_engine(cluster, job_execution) if engine is not None: job_execution = _update_job_status(engine, job_execution) return job_execution def update_job_status(job_execution_id): try: get_job_status(job_execution_id) except Exception as e: LOG.exception("Error during update job execution {job}: {error}" .format(job=job_execution_id, error=e)) def update_job_statuses(cluster_id=None): ctx = context.ctx() kwargs = {'end_time': None} if cluster_id: kwargs.update({'cluster_id': cluster_id}) for je in conductor.job_execution_get_all(ctx, **kwargs): update_job_status(je.id) def get_job_config_hints(job_type): for eng in ENGINES: if job_type in eng.get_supported_job_types(): return eng.get_possible_job_config(job_type) def suspend_job(job_execution_id): ctx = context.ctx() job_execution = conductor.job_execution_get(ctx, job_execution_id) if job_execution.info['status'] not in edp.JOB_STATUSES_SUSPENDIBLE: raise e.SuspendingFailed(_("Suspending operation can not be performed" " on status: {status}")).format( status=job_execution.info['status']) cluster = conductor.cluster_get(ctx, job_execution.cluster_id) engine = get_job_engine(cluster, job_execution) job_execution = conductor.job_execution_update( ctx, job_execution_id, { 'info': {'status': edp.JOB_STATUS_TOBESUSPENDED}}) try: job_info = engine.suspend_job(job_execution) except Exception as ex: job_info = None conductor.job_execution_update( ctx, job_execution_id, {'info': { 'status': edp.JOB_STATUS_SUSPEND_FAILED}}) raise e.SuspendingFailed(_("Error during suspending of job execution: " "{error}")).format(error=ex) if job_info is not None: job_execution = _write_job_status(job_execution, job_info) LOG.info("Job execution was suspended successfully") return job_execution conductor.job_execution_update( ctx, job_execution_id, {'info': { 'status': edp.JOB_STATUS_SUSPEND_FAILED}}) raise e.SuspendingFailed(_("Failed to suspend job execution " "{jid}")).format(jid=job_execution_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/job_utils.py0000664000175000017500000002351700000000000021103 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_utils import uuidutils import six from sahara import conductor as c from sahara import context from sahara.plugins import base as plugin_base from sahara.service.edp.data_sources import manager as ds_manager from sahara.service.edp.utils import shares as shares_service from sahara.utils import remote opts = [ cfg.StrOpt('job_workflow_postfix', default='', help="Postfix for storing jobs in hdfs. Will be " "added to '/user//' path.") ] CONF = cfg.CONF CONF.register_opts(opts) conductor = c.API # Prefix used to mark data_source name references in arg lists DATA_SOURCE_PREFIX = "datasource://" DATA_SOURCE_SUBST_NAME = "edp.substitute_data_source_for_name" DATA_SOURCE_SUBST_UUID = "edp.substitute_data_source_for_uuid" def get_plugin(cluster): return plugin_base.PLUGINS.get_plugin(cluster.plugin_name) def get_data_source(ds_name): return ds_manager.DATA_SOURCES.get_data_source(ds_name) def create_workflow_dir(where, path, job, use_uuid=None, chmod=""): if use_uuid is None: use_uuid = uuidutils.generate_uuid() constructed_dir = _append_slash_if_needed(path) constructed_dir += '%s/%s' % (job.name, use_uuid) with remote.get_remote(where) as r: if chmod: r.execute_command("mkdir -p -m %s %s" % (chmod, constructed_dir)) else: r.execute_command("mkdir -p %s" % constructed_dir) return constructed_dir def _get_data_source_urls(ds, cluster, job_exec_id): # returns a tuple (native_url, runtime_url) return get_data_source(ds.type).get_urls(ds.url, cluster, job_exec_id) def get_input_output_data_sources(job_execution, job, data_source_urls, cluster=None): def _construct(ctx, ds_id): job_exec_id = job_execution.id source = conductor.data_source_get(ctx, ds_id) if source and source.id not in data_source_urls: data_source_urls[source.id] = _get_data_source_urls(source, cluster, job_exec_id) return source ctx = context.ctx() input_source = _construct(ctx, job_execution.input_id) output_source = _construct(ctx, job_execution.output_id) return input_source, output_source def _append_slash_if_needed(path): if path[-1] != '/': path += '/' return path def may_contain_data_source_refs(job_configs): def _check_data_source_ref_option(option): truth = job_configs and ( job_configs.get('configs', {}).get(option)) # Config values specified in the UI may be # passed as strings return truth in (True, 'True') return ( _check_data_source_ref_option(DATA_SOURCE_SUBST_NAME), _check_data_source_ref_option(DATA_SOURCE_SUBST_UUID)) def _data_source_ref_search(job_configs, func, prune=lambda x: x): """Return a list of unique values in job_configs filtered by func(). Loop over the 'args', 'configs' and 'params' elements in job_configs and return a list of all values for which func(value) is True. Optionally provide a 'prune' function that is applied to values before they are added to the return value. """ args = set([prune(arg) for arg in job_configs.get( 'args', []) if func(arg)]) configs = set([prune(val) for val in six.itervalues( job_configs.get('configs', {})) if func(val)]) params = set([prune(val) for val in six.itervalues( job_configs.get('params', {})) if func(val)]) return list(args | configs | params) def find_possible_data_source_refs_by_name(job_configs): """Find string values in job_configs starting with 'datasource://'. Loop over the 'args', 'configs', and 'params' elements of job_configs to find all values beginning with the prefix 'datasource://'. Return a list of unique values with the prefix removed. Note that for 'configs' and 'params', which are dictionaries, only the values are considered and the keys are not relevant. """ def startswith(arg): return isinstance( arg, six.string_types) and arg.startswith(DATA_SOURCE_PREFIX) return _data_source_ref_search(job_configs, startswith, prune=lambda x: x[len(DATA_SOURCE_PREFIX):]) def find_possible_data_source_refs_by_uuid(job_configs): """Find string values in job_configs which are uuids. Return a list of unique values in the 'args', 'configs', and 'params' elements of job_configs which have the form of a uuid. Note that for 'configs' and 'params', which are dictionaries, only the values are considered and the keys are not relevant. """ return _data_source_ref_search(job_configs, uuidutils.is_uuid_like) def resolve_data_source_references(job_configs, job_exec_id, data_source_urls, cluster=None): """Resolve possible data_source references in job_configs. Look for any string values in the 'args', 'configs', and 'params' elements of job_configs which start with 'datasource://' or have the form of a uuid. For values beginning with 'datasource://', strip off the prefix and search for a DataSource object with a name that matches the value. For values having the form of a uuid, search for a DataSource object with an id that matches the value. If a DataSource object is found for the value, replace the value with the URL from the DataSource object. If any DataSource objects are found which reference swift paths and contain credentials, set credential configuration values in job_configs (use the first set of swift credentials found). If no values are resolved, return an empty list and a reference to job_configs. If any values are resolved, return a list of the referenced data_source objects and a copy of job_configs with all of the references replaced with URLs. """ by_name, by_uuid = may_contain_data_source_refs(job_configs) if not (by_name or by_uuid): return [], job_configs ctx = context.ctx() ds_seen = {} new_configs = {} def _resolve(value): kwargs = {} if by_name and isinstance( value, six.string_types) and value.startswith(DATA_SOURCE_PREFIX): value = value[len(DATA_SOURCE_PREFIX):] kwargs['name'] = value elif by_uuid and uuidutils.is_uuid_like(value): kwargs['id'] = value if kwargs: # Name and id are both unique constraints so if there # is more than 1 something is really wrong ds = conductor.data_source_get_all(ctx, **kwargs) if len(ds) == 1: ds = ds[0] ds_seen[ds.id] = ds if ds.id not in data_source_urls: data_source_urls[ds.id] = _get_data_source_urls( ds, cluster, job_exec_id) return data_source_urls[ds.id][1] return value # Loop over configs/params/args and look up each value as a data_source. # If we find it, replace the value. In all cases, we've produced a # copy which is not a FrozenClass type and can be updated. new_configs['configs'] = { k: _resolve(v) for k, v in six.iteritems( job_configs.get('configs', {}))} new_configs['params'] = { k: _resolve(v) for k, v in six.iteritems( job_configs.get('params', {}))} new_configs['args'] = [_resolve(a) for a in job_configs.get('args', [])] # If we didn't resolve anything we might as well return the original ds_seen = ds_seen.values() if not ds_seen: return [], job_configs # If there are proxy_configs we'll need to copy these, too, # so job_configs is complete if job_configs.get('proxy_configs'): new_configs['proxy_configs'] = { k: v for k, v in six.iteritems(job_configs.get('proxy_configs'))} return ds_seen, new_configs def prepare_cluster_for_ds(data_sources, cluster, job_configs, ds_urls): for ds in data_sources: if ds: get_data_source(ds.type).prepare_cluster( ds, cluster, job_configs=job_configs, runtime_url=ds_urls[ds.id]) def to_url_dict(data_source_urls, runtime=False): idx = 1 if runtime else 0 return {id: urls[idx] for id, urls in six.iteritems(data_source_urls)} def mount_share_at_default_path(url, cluster): # Automount this share to the cluster with default path # url example: 'manila://ManilaShare-uuid/path_to_file' share_id = six.moves.urllib.parse.urlparse(url).netloc if cluster.shares: cluster_shares = [dict(s) for s in cluster.shares] else: cluster_shares = [] needed_share = { 'id': share_id, 'path': shares_service.default_mount(share_id), 'access_level': 'rw' } cluster_shares.append(needed_share) cluster = conductor.cluster_update( context.ctx(), cluster, {'shares': cluster_shares}) shares_service.mount_shares(cluster) return shares_service.get_share_path(url, cluster.shares) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.737891 sahara-16.0.0/sahara/service/edp/oozie/0000775000175000017500000000000000000000000017654 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/__init__.py0000664000175000017500000000000000000000000021753 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/engine.py0000664000175000017500000004525600000000000021507 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import os import xml.dom.minidom as xml from oslo_config import cfg from oslo_utils import uuidutils import six from sahara import conductor as c from sahara import context from sahara.service.edp import base_engine from sahara.service.edp import hdfs_helper as h from sahara.service.edp.job_binaries import manager as jb_manager from sahara.service.edp import job_utils from sahara.service.edp.oozie import oozie as o from sahara.service.edp.oozie.workflow_creator import workflow_factory from sahara.service.validations.edp import job_execution as j from sahara.utils import edp from sahara.utils import remote from sahara.utils import xmlutils as x CONF = cfg.CONF conductor = c.API @six.add_metaclass(abc.ABCMeta) class OozieJobEngine(base_engine.JobEngine): def __init__(self, cluster): self.cluster = cluster self.plugin = job_utils.get_plugin(self.cluster) def get_remote_client(self): return o.RemoteOozieClient(self.get_oozie_server_uri(self.cluster), self.get_oozie_server(self.cluster), self.get_hdfs_user()) def get_client(self): # by default engine will return standard oozie client implementation return o.OozieClient(self.get_oozie_server_uri(self.cluster), self.get_oozie_server(self.cluster)) def _get_oozie_job_params(self, hdfs_user, path_to_workflow, oozie_params, use_hbase_lib, scheduled_params=None, job_dir=None, job_execution_type=None): oozie_libpath_key = "oozie.libpath" oozie_libpath = "" rm_path = self.get_resource_manager_uri(self.cluster) nn_path = self.get_name_node_uri(self.cluster) hbase_common_lib_path = "%s%s" % (nn_path, h.HBASE_COMMON_LIB_PATH) if use_hbase_lib: if oozie_libpath_key in oozie_params: oozie_libpath = "%s,%s" % (oozie_params.get(oozie_libpath_key, ""), hbase_common_lib_path) else: oozie_libpath = hbase_common_lib_path if job_execution_type == "scheduled": app_path = "oozie.coord.application.path" job_parameters = { "start": scheduled_params.get('start'), "end": scheduled_params.get('end'), "frequency": scheduled_params.get('frequency'), "workflowAppUri": "%s%s" % (nn_path, job_dir), app_path: "%s%s" % (nn_path, job_dir)} else: app_path = "oozie.wf.application.path" job_parameters = { app_path: "%s%s" % (nn_path, path_to_workflow)} job_parameters["nameNode"] = nn_path job_parameters["user.name"] = hdfs_user job_parameters["jobTracker"] = rm_path job_parameters[oozie_libpath_key] = oozie_libpath job_parameters["oozie.use.system.libpath"] = "true" # Don't let the application path be overwritten, that can't # possibly make any sense if app_path in oozie_params: del oozie_params[app_path] if oozie_libpath_key in oozie_params: del oozie_params[oozie_libpath_key] job_parameters.update(oozie_params) return job_parameters def _upload_workflow_file(self, where, job_dir, wf_xml, hdfs_user): with remote.get_remote(where) as r: h.put_file_to_hdfs(r, wf_xml, "workflow.xml", job_dir, hdfs_user) return "%s/workflow.xml" % job_dir def _upload_coordinator_file(self, where, job_dir, wf_xml, hdfs_user): with remote.get_remote(where) as r: h.put_file_to_hdfs(r, wf_xml, "coordinator.xml", job_dir, hdfs_user) return "%s/coordinator.xml" % job_dir def cancel_job(self, job_execution): if job_execution.engine_job_id is not None: client = self.get_client() client.kill_job(job_execution) return client.get_job_info(job_execution) def get_job_status(self, job_execution): if job_execution.engine_job_id is not None: return self.get_client().get_job_info(job_execution) def _prepare_run_job(self, job_execution): ctx = context.ctx() # This will be a dictionary of tuples, (native_url, runtime_url) # keyed by data_source id data_source_urls = {} prepared_job_params = {} job = conductor.job_get(ctx, job_execution.job_id) input_source, output_source = job_utils.get_input_output_data_sources( job_execution, job, data_source_urls, self.cluster) # Updated_job_configs will be a copy of job_execution.job_configs with # any name or uuid references to data_sources resolved to paths # assuming substitution is enabled. # If substitution is not enabled then updated_job_configs will # just be a reference to job_execution.job_configs to avoid a copy. # Additional_sources will be a list of any data_sources found. additional_sources, updated_job_configs = ( job_utils.resolve_data_source_references(job_execution.job_configs, job_execution.id, data_source_urls, self.cluster) ) job_execution = conductor.job_execution_update( ctx, job_execution, {"data_source_urls": job_utils.to_url_dict(data_source_urls)}) # Now that we've recorded the native urls, we can switch to the # runtime urls data_source_urls = job_utils.to_url_dict(data_source_urls, runtime=True) data_sources = additional_sources + [input_source, output_source] job_utils.prepare_cluster_for_ds(data_sources, self.cluster, updated_job_configs, data_source_urls) proxy_configs = updated_job_configs.get('proxy_configs') configs = updated_job_configs.get('configs', {}) use_hbase_lib = configs.get('edp.hbase_common_lib', {}) # Extract all the 'oozie.' configs so that they can be set in the # job properties file. These are config values for Oozie itself, # not the job code oozie_params = {} for k in list(configs): if k.startswith('oozie.'): oozie_params[k] = configs[k] external_hdfs_urls = self._resolve_external_hdfs_urls( job_execution.job_configs) for url in external_hdfs_urls: h.configure_cluster_for_hdfs(self.cluster, url) hdfs_user = self.get_hdfs_user() # TODO(tmckay): this should probably be "get_namenode" # but that call does not exist in the oozie engine api now. oozie_server = self.get_oozie_server(self.cluster) wf_dir = self._create_hdfs_workflow_dir(oozie_server, job) self._upload_job_files_to_hdfs(oozie_server, wf_dir, job, configs, proxy_configs) wf_xml = workflow_factory.get_workflow_xml( job, self.cluster, updated_job_configs, input_source, output_source, hdfs_user, data_source_urls) path_to_workflow = self._upload_workflow_file(oozie_server, wf_dir, wf_xml, hdfs_user) prepared_job_params['context'] = ctx prepared_job_params['hdfs_user'] = hdfs_user prepared_job_params['path_to_workflow'] = path_to_workflow prepared_job_params['use_hbase_lib'] = use_hbase_lib prepared_job_params['job_execution'] = job_execution prepared_job_params['oozie_params'] = oozie_params prepared_job_params['wf_dir'] = wf_dir prepared_job_params['oozie_server'] = oozie_server return prepared_job_params def run_job(self, job_execution): prepared_job_params = self._prepare_run_job(job_execution) path_to_workflow = prepared_job_params['path_to_workflow'] hdfs_user = prepared_job_params['hdfs_user'] oozie_params = prepared_job_params['oozie_params'] use_hbase_lib = prepared_job_params['use_hbase_lib'] ctx = prepared_job_params['context'] job_execution = prepared_job_params['job_execution'] job_params = self._get_oozie_job_params(hdfs_user, path_to_workflow, oozie_params, use_hbase_lib) client = self.get_client() oozie_job_id = client.add_job(x.create_hadoop_xml(job_params), job_execution) job_execution = conductor.job_execution_get(ctx, job_execution.id) if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED: return (None, edp.JOB_STATUS_KILLED, None) conductor.job_execution_update( context.ctx(), job_execution.id, {'info': {'status': edp.JOB_STATUS_READYTORUN}, 'engine_job_id': oozie_job_id}) client.run_job(job_execution, oozie_job_id) try: status = client.get_job_info(job_execution, oozie_job_id)['status'] except Exception: status = None return (oozie_job_id, status, None) def run_scheduled_job(self, job_execution): prepared_job_params = self._prepare_run_job(job_execution) oozie_server = prepared_job_params['oozie_server'] wf_dir = prepared_job_params['wf_dir'] hdfs_user = prepared_job_params['hdfs_user'] oozie_params = prepared_job_params['oozie_params'] use_hbase_lib = prepared_job_params['use_hbase_lib'] ctx = prepared_job_params['context'] job_execution = prepared_job_params['job_execution'] coord_configs = {"jobTracker": "${jobTracker}", "nameNode": "${nameNode}"} coord_xml = self._create_coordinator_xml(coord_configs) self._upload_coordinator_file(oozie_server, wf_dir, coord_xml, hdfs_user) job_params = self._get_oozie_job_params( hdfs_user, None, oozie_params, use_hbase_lib, job_execution.job_configs.job_execution_info, wf_dir, "scheduled") client = self.get_client() oozie_job_id = client.add_job(x.create_hadoop_xml(job_params), job_execution) job_execution = conductor.job_execution_get(ctx, job_execution.id) if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED: return (None, edp.JOB_STATUS_KILLED, None) try: status = client.get_job_status(job_execution, oozie_job_id)['status'] except Exception: status = None return (oozie_job_id, status, None) @abc.abstractmethod def get_hdfs_user(self): pass @abc.abstractmethod def create_hdfs_dir(self, remote, dir_name): pass @abc.abstractmethod def get_oozie_server_uri(self, cluster): pass @abc.abstractmethod def get_oozie_server(self, cluster): pass @abc.abstractmethod def get_name_node_uri(self, cluster): pass @abc.abstractmethod def get_resource_manager_uri(self, cluster): pass def validate_job_execution(self, cluster, job, data): # Shell job type requires no specific fields if job.type == edp.JOB_TYPE_SHELL: return # All other types except Java require input and output # objects and Java require main class if job.type == edp.JOB_TYPE_JAVA: j.check_main_class_present(data, job) else: j.check_data_sources(data, job) job_type, subtype = edp.split_job_type(job.type) if job_type == edp.JOB_TYPE_MAPREDUCE and ( subtype == edp.JOB_SUBTYPE_STREAMING): j.check_streaming_present(data, job) @staticmethod def get_possible_job_config(job_type): return workflow_factory.get_possible_job_config(job_type) @staticmethod def get_supported_job_types(): return [edp.JOB_TYPE_HIVE, edp.JOB_TYPE_JAVA, edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_MAPREDUCE_STREAMING, edp.JOB_TYPE_PIG, edp.JOB_TYPE_SHELL] def _prepare_job_binaries(self, job_binaries, r): for jb in job_binaries: jb_manager.JOB_BINARIES.get_job_binary_by_url(jb.url). \ prepare_cluster(jb, remote=r) def _upload_job_files_to_hdfs(self, where, job_dir, job, configs, proxy_configs=None): mains = list(job.mains) if job.mains else [] libs = list(job.libs) if job.libs else [] builtin_libs = edp.get_builtin_binaries(job, configs) uploaded_paths = [] hdfs_user = self.get_hdfs_user() job_dir_suffix = 'lib' if job.type != edp.JOB_TYPE_SHELL else '' lib_dir = os.path.join(job_dir, job_dir_suffix) with remote.get_remote(where) as r: job_binaries = mains + libs self._prepare_job_binaries(job_binaries, r) # upload mains uploaded_paths.extend(self._upload_job_binaries(r, mains, proxy_configs, hdfs_user, job_dir)) # upload libs if len(libs) and job_dir_suffix: # HDFS 2.2.0 fails to put file if the lib dir does not exist self.create_hdfs_dir(r, lib_dir) uploaded_paths.extend(self._upload_job_binaries(r, libs, proxy_configs, hdfs_user, lib_dir)) # upload buitin_libs for lib in builtin_libs: h.put_file_to_hdfs(r, lib['raw'], lib['name'], lib_dir, hdfs_user) uploaded_paths.append(lib_dir + lib['name']) return uploaded_paths def _upload_job_binaries(self, r, job_binaries, proxy_configs, hdfs_user, job_dir): uploaded_paths = [] for jb in job_binaries: path = jb_manager.JOB_BINARIES. \ get_job_binary_by_url(jb.url). \ copy_binary_to_cluster(jb, proxy_configs=proxy_configs, remote=r, context=context.ctx()) h.copy_from_local(r, path, job_dir, hdfs_user) uploaded_paths.append(path) return uploaded_paths def _create_hdfs_workflow_dir(self, where, job): constructed_dir = '/user/%s/' % self.get_hdfs_user() constructed_dir = self._add_postfix(constructed_dir) constructed_dir += '%s/%s' % (job.name, uuidutils.generate_uuid()) with remote.get_remote(where) as r: self.create_hdfs_dir(r, constructed_dir) return constructed_dir def _create_coordinator_xml(self, coord_configs, config_filter=None, appname='coord'): doc = xml.Document() # Create the base element coord = doc.createElement('coordinator-app') coord.attributes['name'] = appname coord.attributes['start'] = "${start}" coord.attributes['end'] = "${end}" coord.attributes['frequency'] = "${frequency}" coord.attributes['timezone'] = 'UTC' coord.attributes['xmlns'] = 'uri:oozie:coordinator:0.2' doc.appendChild(coord) action = doc.createElement('action') workflow = doc.createElement('workflow') coord.appendChild(action) action.appendChild(workflow) x.add_text_element_to_tag(doc, "workflow", 'app-path', "${workflowAppUri}") configuration = doc.createElement('configuration') workflow.appendChild(configuration) default_configs = [] if config_filter is not None: default_configs = [cfg['name'] for cfg in config_filter] for name in sorted(coord_configs): if name in default_configs or config_filter is None: x.add_property_to_configuration(doc, name, coord_configs[name]) # Return newly created XML return doc.toprettyxml(indent=" ") def _add_postfix(self, constructed_dir): def _append_slash_if_needed(path): if path[-1] != '/': path += '/' return path constructed_dir = _append_slash_if_needed(constructed_dir) if CONF.job_workflow_postfix: constructed_dir = ''.join([str(constructed_dir), str(CONF.job_workflow_postfix)]) return _append_slash_if_needed(constructed_dir) def _resolve_external_hdfs_urls(self, job_configs): external_hdfs_urls = [] for k, v in six.iteritems(job_configs.get('configs', {})): if isinstance(v, six.string_types) and v.startswith("hdfs://"): external_hdfs_urls.append(v) for k, v in six.iteritems(job_configs.get('params', {})): if isinstance(v, six.string_types) and v.startswith("hdfs://"): external_hdfs_urls.append(v) for v in job_configs.get('args', []): if isinstance(v, six.string_types) and v.startswith("hdfs://"): external_hdfs_urls.append(v) return external_hdfs_urls def suspend_job(self, job_execution): return self._manage_job(job_execution, edp.JOB_ACTION_SUSPEND) def _manage_job(self, job_execution, action): if job_execution.oozie_job_id is not None: client = self.get_client() if action == edp.JOB_ACTION_SUSPEND: client.suspend_job(job_execution) return client.get_job_status(job_execution) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/oozie.py0000664000175000017500000001225700000000000021362 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import re from oslo_serialization import jsonutils as json from oslo_utils import uuidutils import six from six.moves.urllib import parse as urlparse import sahara.exceptions as ex @six.add_metaclass(abc.ABCMeta) class BaseOozieClient(object): def __init__(self, url, oozie_server): self.job_url = url + "/v2/job/%s" self.jobs_url = url + "/v2/jobs" self.oozie_server = oozie_server self.port = urlparse.urlparse(url).port @abc.abstractmethod def add_job(self, job_config, job_execution): pass @abc.abstractmethod def manage_job(self, job_execution, action, job_id=None): pass @abc.abstractmethod def get_job_info(self, job_execution, job_id=None): pass def kill_job(self, job_execution): self.manage_job(job_execution, 'kill') def run_job(self, job_execution, job_id): self.manage_job(job_execution, 'start', job_id=job_id) class OozieClient(BaseOozieClient): def add_job(self, job_config, job_execution): return self.post( job_execution, self.jobs_url, data=job_config, headers={ "Content-Type": "application/xml;charset=UTF-8"}) def manage_job(self, job_execution, action, job_id=None): job_id = job_id if job_id else job_execution.engine_job_id url = self.job_url % job_id + "?action=" + action self.put(job_execution, url) def get_job_info(self, job_execution, job_id=None): job_id = job_id if job_id else job_execution.engine_job_id url = self.job_url % job_id + "?show=info" return self.get(job_execution, url) def _get_http_session(self, info=None): return self.oozie_server.remote().get_http_client(self.port, info=info) def post(self, job_execution, url, data, headers): session = self._get_http_session(job_execution.extra.get('neutron')) resp = session.post(url, data=data, headers=headers) _check_status_code(resp, 201) return get_json(resp)['id'] def put(self, job_execution, url): session = self._get_http_session(job_execution.extra.get('neutron')) resp = session.put(url) _check_status_code(resp, 200) def get(self, job_execution, url): session = self._get_http_session(job_execution.extra.get('neutron')) resp = session.get(url) _check_status_code(resp, 200) return get_json(resp) class RemoteOozieClient(OozieClient): def __init__(self, url, oozie_server, hdfs_user): self.hdfs_user = hdfs_user self.oozie_url = url.replace( urlparse.urlparse(url).hostname, oozie_server.fqdn()) super(RemoteOozieClient, self).__init__(url, oozie_server) def _oozie(self, cmd): return ( "sudo su - -c 'oozie -Doozie.auth.token.cache=false " "{cmd} -oozie {oozie}' {user}".format( cmd=cmd, oozie=self.oozie_url, user=self.hdfs_user)) def add_job(self, job_config, job_execution): with self.oozie_server.remote() as r: name = "/tmp/%s.xml" % uuidutils.generate_uuid()[:8] r.write_file_to(name, job_config) cmd = self._oozie("job -submit -config %s" % name) cmd += " | awk '{ print $2 }'" code, stdout = r.execute_command(cmd) stdout = stdout.strip() return stdout def manage_job(self, job_execution, action, job_id=None): job_id = job_id if job_id else job_execution.engine_job_id cmd = self._oozie("job -%s %s" % (action, job_id)) with self.oozie_server.remote() as r: r.execute_command(cmd) def get_job_info(self, job_execution, job_id=None): job_id = job_id if job_id else job_execution.engine_job_id cmd = self._oozie("job -info %s" % job_id) cmd += " | grep Status | head -n 1 | awk '{ print $3 }'" with self.oozie_server.remote() as r: code, stdout = r.execute_command(cmd) return {'status': stdout.strip()} def _check_status_code(resp, expected_code): if resp.status_code != expected_code: resp_text = resp.text # cleaning tomcat error message message = resp_text.split("
")[1] message = message.replace("

", "\n") message = re.sub('<[^<]+?>', ' ', message) raise ex.OozieException(message) def get_json(response): """Provides backward compatibility for old versions of requests library.""" json_field_or_function = getattr(response, 'json', None) if callable(json_field_or_function): return response.json() else: return json.loads(response.content) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.737891 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/0000775000175000017500000000000000000000000023245 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/__init__.py0000664000175000017500000000000000000000000025344 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/base_workflow.py0000664000175000017500000000636600000000000026476 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import xml.dom.minidom as xml import sahara.exceptions as ex from sahara.i18n import _ from sahara.utils import xmlutils as x class OozieWorkflowCreator(object): doc = None tag_name = "no-op" def __init__(self, name): self.doc = x.load_xml_document("service/edp/resources/workflow.xml", strip=True) self.tag_name = name x.add_child(self.doc, 'action', self.tag_name) ok_elem = xml.parseString('<%s to="%s"/>' % ("ok", "end")) x.add_element(self.doc, 'action', ok_elem.firstChild) error_elem = xml.parseString('<%s to="%s"/>' % ("error", "fail")) x.add_element(self.doc, 'action', error_elem.firstChild) x.add_text_element_to_tag(self.doc, self.tag_name, 'job-tracker', "${jobTracker}") x.add_text_element_to_tag(self.doc, self.tag_name, 'name-node', "${nameNode}") def _add_to_prepare_element(self, element, paths): if element not in ['delete', 'mkdir']: raise ex.NotFoundException(element, _('"%s" child cannot be ' 'added to prepare element')) prop = x.get_and_create_if_not_exist(self.doc, self.tag_name, 'prepare') for path in paths: elem = xml.parseString('<%s path="%s"/>' % (element, path)) prop.appendChild(elem.firstChild) def _add_to_streaming_element(self, element, path): if element not in ['mapper', 'reducer']: raise ex.NotFoundException(element, _('"%s" child cannot be added ' 'to streaming element')) x.get_and_create_if_not_exist(self.doc, self.tag_name, 'streaming') x.add_text_element_to_tag(self.doc, 'streaming', element, path) def _add_configuration_elements(self, configuration): if configuration: x.add_properties_to_configuration(self.doc, self.tag_name, configuration) def _add_job_xml_element(self, job_xml): if job_xml: x.add_text_element_to_tag(self.doc, self.tag_name, 'job-xml', job_xml) def _add_files_and_archives(self, files, archives): if files: x.add_tagged_list(self.doc, self.tag_name, 'file', files) if archives: x.add_tagged_list(self.doc, self.tag_name, 'archive', archives) def get_built_workflow_xml(self): return self.doc.toprettyxml(indent=" ") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/hive_workflow.py0000664000175000017500000000325500000000000026511 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp.oozie.workflow_creator import base_workflow from sahara.utils import xmlutils as x class HiveWorkflowCreator(base_workflow.OozieWorkflowCreator): def __init__(self): super(HiveWorkflowCreator, self).__init__('hive') hive_elem = self.doc.getElementsByTagName('hive')[0] hive_elem.setAttribute('xmlns', 'uri:oozie:hive-action:0.2') def build_workflow_xml(self, script, job_xml, prepare=None, configuration=None, params=None, files=None, archives=None): prepare = prepare or {} params = params or {} files = files or [] archives = archives or [] for k in sorted(prepare): self._add_to_prepare_element(k, prepare[k]) self._add_job_xml_element(job_xml) self._add_configuration_elements(configuration) x.add_text_element_to_tag(self.doc, self.tag_name, 'script', script) x.add_equal_separated_dict(self.doc, self.tag_name, 'param', params) self._add_files_and_archives(files, archives) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/java_workflow.py0000664000175000017500000000366100000000000026500 0ustar00zuulzuul00000000000000# Copyright (c) 2013 RedHat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp.oozie.workflow_creator import base_workflow from sahara.utils import xmlutils as x class JavaWorkflowCreator(base_workflow.OozieWorkflowCreator): def __init__(self): super(JavaWorkflowCreator, self).__init__('java') def build_workflow_xml(self, main_class, prepare=None, job_xml=None, configuration=None, java_opts=None, arguments=None, files=None, archives=None): prepare = prepare or {} arguments = arguments or [] files = files or [] archives = archives or [] for k in sorted(prepare): self._add_to_prepare_element(k, prepare[k]) self._add_job_xml_element(job_xml) self._add_configuration_elements(configuration) x.add_text_element_to_tag(self.doc, self.tag_name, 'main-class', main_class) if java_opts: x.add_text_element_to_tag(self.doc, self.tag_name, 'java-opts', java_opts) for arg in arguments: x.add_text_element_to_tag(self.doc, self.tag_name, 'arg', arg) self._add_files_and_archives(files, archives) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/mapreduce_workflow.py0000664000175000017500000000301700000000000027517 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp.oozie.workflow_creator import base_workflow class MapReduceWorkFlowCreator(base_workflow.OozieWorkflowCreator): def __init__(self): super(MapReduceWorkFlowCreator, self).__init__('map-reduce') def build_workflow_xml(self, prepare=None, job_xml=None, configuration=None, files=None, archives=None, streaming=None): prepare = prepare or {} files = files or [] archives = archives or [] streaming = streaming or {} for k in sorted(prepare): self._add_to_prepare_element(k, prepare[k]) # TODO(aignatov): Need to add PIPES workflow for k in sorted(streaming): self._add_to_streaming_element(k, streaming[k]) self._add_job_xml_element(job_xml) self._add_configuration_elements(configuration) self._add_files_and_archives(files, archives) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/pig_workflow.py0000664000175000017500000000333200000000000026331 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp.oozie.workflow_creator import base_workflow from sahara.utils import xmlutils as x class PigWorkflowCreator(base_workflow.OozieWorkflowCreator): def __init__(self): super(PigWorkflowCreator, self).__init__('pig') def build_workflow_xml(self, script_name, prepare=None, job_xml=None, configuration=None, params=None, arguments=None, files=None, archives=None): prepare = prepare or {} params = params or {} arguments = arguments or [] files = files or [] archives = archives or [] for k in sorted(prepare): self._add_to_prepare_element(k, prepare[k]) self._add_job_xml_element(job_xml) self._add_configuration_elements(configuration) x.add_text_element_to_tag(self.doc, self.tag_name, 'script', script_name) x.add_equal_separated_dict(self.doc, self.tag_name, 'param', params) for arg in arguments: x.add_text_element_to_tag(self.doc, self.tag_name, 'argument', arg) self._add_files_and_archives(files, archives) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/shell_workflow.py0000664000175000017500000000344000000000000026661 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.edp.oozie.workflow_creator import base_workflow from sahara.utils import xmlutils as x class ShellWorkflowCreator(base_workflow.OozieWorkflowCreator): SHELL_XMLNS = {"xmlns": "uri:oozie:shell-action:0.1"} def __init__(self): super(ShellWorkflowCreator, self).__init__('shell') def build_workflow_xml(self, script_name, prepare=None, job_xml=None, configuration=None, env_vars=None, arguments=None, files=None): x.add_attributes_to_element(self.doc, self.tag_name, self.SHELL_XMLNS) prepare = prepare or {} env_vars = env_vars or {} arguments = arguments or [] files = files or [] for k in sorted(prepare): self._add_to_prepare_element(k, prepare[k]) self._add_configuration_elements(configuration) x.add_text_element_to_tag(self.doc, self.tag_name, 'exec', script_name) for arg in arguments: x.add_text_element_to_tag(self.doc, self.tag_name, 'argument', arg) x.add_equal_separated_dict(self.doc, self.tag_name, 'env-var', env_vars) self._add_files_and_archives(files + [script_name], []) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/oozie/workflow_creator/workflow_factory.py0000664000175000017500000003434000000000000027224 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import six from sahara import conductor as c from sahara import context from sahara.service.castellan import utils as key_manager from sahara.service.edp.oozie.workflow_creator import hive_workflow from sahara.service.edp.oozie.workflow_creator import java_workflow from sahara.service.edp.oozie.workflow_creator import mapreduce_workflow from sahara.service.edp.oozie.workflow_creator import pig_workflow from sahara.service.edp.oozie.workflow_creator import shell_workflow from sahara.service.edp import s3_common from sahara.swift import swift_helper as sw from sahara.swift import utils as su from sahara.utils import edp from sahara.utils import xmlutils conductor = c.API CONF = cfg.CONF class BaseFactory(object): def _separate_edp_configs(self, job_dict): configs = {} edp_configs = {} if 'configs' in job_dict: for k, v in six.iteritems(job_dict['configs']): if k.startswith('edp.'): edp_configs[k] = v elif not k.startswith('oozie.'): # 'oozie.' configs have been written to the properties file configs[k] = v return configs, edp_configs def _prune_edp_configs(self, job_dict): if job_dict is None: return {}, {} # Rather than copy.copy, we make this by hand # to avoid FrozenClassError when we update 'configs' pruned_job_dict = {} for k, v in six.iteritems(job_dict): pruned_job_dict[k] = v # Separate out "edp." configs into its own dictionary configs, edp_configs = self._separate_edp_configs(job_dict) # Prune the new job_dict so it does not hold "edp." configs pruned_job_dict['configs'] = configs return pruned_job_dict, edp_configs def _update_dict(self, dest, src): if src is not None: for key, value in six.iteritems(dest): if hasattr(value, "update"): new_vals = src.get(key, {}) value.update(new_vals) def update_job_dict(self, job_dict, exec_dict): pruned_exec_dict, edp_configs = self._prune_edp_configs(exec_dict) self._update_dict(job_dict, pruned_exec_dict) # Add the separated "edp." configs to the job_dict job_dict['edp_configs'] = edp_configs # Args are listed, not named. Simply replace them. job_dict['args'] = pruned_exec_dict.get('args', []) # Find all swift:// paths in args, configs, and params and # add the .sahara suffix to the container if it is not there # already job_dict['args'] = [ # TODO(tmckay) args for Pig can actually be -param name=value # and value could conceivably contain swift paths su.inject_swift_url_suffix(arg) for arg in job_dict['args']] for k, v in six.iteritems(job_dict.get('configs', {})): job_dict['configs'][k] = su.inject_swift_url_suffix(v) for k, v in six.iteritems(job_dict.get('params', {})): job_dict['params'][k] = su.inject_swift_url_suffix(v) def get_configs(self, input_data, output_data, proxy_configs=None): configs = {} if proxy_configs: configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get( 'proxy_username') configs[sw.HADOOP_SWIFT_PASSWORD] = key_manager.get_secret( proxy_configs.get('proxy_password')) configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get( 'proxy_trust_id') configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name return configs for src in (input_data, output_data): if src.type == "swift" and hasattr(src, "credentials"): if "user" in src.credentials: configs[sw.HADOOP_SWIFT_USERNAME] = src.credentials['user'] if "password" in src.credentials: configs[sw.HADOOP_SWIFT_PASSWORD] = ( key_manager.get_secret(src.credentials['password'])) break for src in (input_data, output_data): if src.type == "s3" and hasattr(src, "credentials"): if "accesskey" in src.credentials: configs[s3_common.S3_ACCESS_KEY_CONFIG] = ( src.credentials['accesskey']) if "secretkey" in src.credentials: configs[s3_common.S3_SECRET_KEY_CONFIG] = ( key_manager.get_secret(src.credentials['secretkey'])) if "endpoint" in src.credentials: configs[s3_common.S3_ENDPOINT_CONFIG] = ( src.credentials['endpoint']) if "bucket_in_path" in src.credentials: configs[s3_common.S3_BUCKET_IN_PATH_CONFIG] = ( src.credentials['bucket_in_path']) if "ssl" in src.credentials: configs[s3_common.S3_SSL_CONFIG] = ( src.credentials['ssl']) break return configs def get_params(self, input_data, output_data, data_source_urls): return {'INPUT': data_source_urls[input_data.id], 'OUTPUT': data_source_urls[output_data.id]} class PigFactory(BaseFactory): def __init__(self, job): super(PigFactory, self).__init__() self.name = self.get_script_name(job) def get_script_name(self, job): return conductor.job_main_name(context.ctx(), job) def get_workflow_xml(self, cluster, job_configs, input_data, output_data, hdfs_user, data_source_urls): proxy_configs = job_configs.get('proxy_configs') job_dict = {'configs': self.get_configs(input_data, output_data, proxy_configs), 'params': self.get_params(input_data, output_data, data_source_urls), 'args': []} self.update_job_dict(job_dict, job_configs) creator = pig_workflow.PigWorkflowCreator() creator.build_workflow_xml(self.name, configuration=job_dict['configs'], params=job_dict['params'], arguments=job_dict['args']) return creator.get_built_workflow_xml() class HiveFactory(BaseFactory): def __init__(self, job): super(HiveFactory, self).__init__() self.name = self.get_script_name(job) def get_script_name(self, job): return conductor.job_main_name(context.ctx(), job) def get_workflow_xml(self, cluster, job_configs, input_data, output_data, hdfs_user, data_source_urls): proxy_configs = job_configs.get('proxy_configs') job_dict = {'configs': self.get_configs(input_data, output_data, proxy_configs), 'params': self.get_params(input_data, output_data, data_source_urls)} self.update_job_dict(job_dict, job_configs) creator = hive_workflow.HiveWorkflowCreator() creator.build_workflow_xml(self.name, edp.get_hive_shared_conf_path(hdfs_user), configuration=job_dict['configs'], params=job_dict['params']) return creator.get_built_workflow_xml() class MapReduceFactory(BaseFactory): def get_configs(self, input_data, output_data, proxy_configs, data_source_urls): configs = super(MapReduceFactory, self).get_configs(input_data, output_data, proxy_configs) configs['mapred.input.dir'] = data_source_urls[input_data.id] configs['mapred.output.dir'] = data_source_urls[output_data.id] return configs def _get_streaming(self, job_dict): prefix = 'edp.streaming.' return {k[len(prefix):]: v for (k, v) in six.iteritems( job_dict['edp_configs']) if k.startswith(prefix)} def get_workflow_xml(self, cluster, job_configs, input_data, output_data, hdfs_user, data_source_urls): proxy_configs = job_configs.get('proxy_configs') job_dict = {'configs': self.get_configs(input_data, output_data, proxy_configs, data_source_urls)} self.update_job_dict(job_dict, job_configs) creator = mapreduce_workflow.MapReduceWorkFlowCreator() creator.build_workflow_xml(configuration=job_dict['configs'], streaming=self._get_streaming(job_dict)) return creator.get_built_workflow_xml() class JavaFactory(BaseFactory): def _get_java_configs(self, job_dict): main_class = job_dict['edp_configs']['edp.java.main_class'] java_opts = job_dict['edp_configs'].get('edp.java.java_opts', None) args = job_dict['args'] if edp.is_adapt_for_oozie_enabled(job_dict['edp_configs']): if args: args = [main_class] + args else: args = [main_class] main_class = 'org.openstack.sahara.edp.MainWrapper' return main_class, java_opts, args def get_configs(self, proxy_configs=None): # TODO(jfreud): allow s3 and non-proxy swift configs here? configs = {} if proxy_configs: configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get( 'proxy_username') configs[sw.HADOOP_SWIFT_PASSWORD] = key_manager.get_secret( proxy_configs.get('proxy_password')) configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get( 'proxy_trust_id') configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name return configs return configs def get_workflow_xml(self, cluster, job_configs, *args, **kwargs): proxy_configs = job_configs.get('proxy_configs') job_dict = {'configs': self.get_configs(proxy_configs=proxy_configs), 'args': []} self.update_job_dict(job_dict, job_configs) main_class, java_opts, args = self._get_java_configs(job_dict) creator = java_workflow.JavaWorkflowCreator() creator.build_workflow_xml(main_class, configuration=job_dict['configs'], java_opts=java_opts, arguments=args) return creator.get_built_workflow_xml() class ShellFactory(BaseFactory): def __init__(self, job): self.name, self.file_names = self.get_file_names(job) def get_file_names(self, job): ctx = context.ctx() return (conductor.job_main_name(ctx, job), conductor.job_lib_names(ctx, job)) def get_configs(self): return {'configs': {}, 'params': {}, 'args': []} def get_workflow_xml(self, cluster, job_configs, *args, **kwargs): job_dict = self.get_configs() self.update_job_dict(job_dict, job_configs) creator = shell_workflow.ShellWorkflowCreator() creator.build_workflow_xml(self.name, configuration=job_dict['configs'], env_vars=job_dict['params'], arguments=job_dict['args'], files=self.file_names) return creator.get_built_workflow_xml() def _get_creator(job): def make_PigFactory(): return PigFactory(job) def make_HiveFactory(): return HiveFactory(job) def make_ShellFactory(): return ShellFactory(job) type_map = { edp.JOB_TYPE_HIVE: make_HiveFactory, edp.JOB_TYPE_JAVA: JavaFactory, edp.JOB_TYPE_MAPREDUCE: MapReduceFactory, edp.JOB_TYPE_MAPREDUCE_STREAMING: MapReduceFactory, edp.JOB_TYPE_PIG: make_PigFactory, edp.JOB_TYPE_SHELL: make_ShellFactory } return type_map[job.type]() def get_workflow_xml(job, cluster, job_configs, *args, **kwargs): return _get_creator(job).get_workflow_xml( cluster, job_configs, *args, **kwargs) def get_possible_job_config(job_type): if not edp.compare_job_type(job_type, *edp.JOB_TYPES_ALL): return None if edp.compare_job_type(job_type, edp.JOB_TYPE_JAVA): return {'job_config': {'configs': [], 'args': []}} if edp.compare_job_type(job_type, edp.JOB_TYPE_SHELL): return {'job_config': {'configs': [], 'params': {}, 'args': []}} if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE, edp.JOB_TYPE_PIG): cfg = xmlutils.load_hadoop_xml_defaults( 'service/edp/resources/mapred-default.xml') if edp.compare_job_type(job_type, edp.JOB_TYPE_MAPREDUCE): cfg += get_possible_mapreduce_configs() elif edp.compare_job_type(job_type, edp.JOB_TYPE_HIVE): cfg = xmlutils.load_hadoop_xml_defaults( 'service/edp/resources/hive-default.xml') config = {'configs': cfg} if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE): config.update({'params': {}}) if edp.compare_job_type(job_type, edp.JOB_TYPE_PIG): config.update({'args': []}) return {'job_config': config} def get_possible_mapreduce_configs(): '''return a list of possible configuration values for map reduce jobs.''' cfg = xmlutils.load_hadoop_xml_defaults( 'service/edp/resources/mapred-job-config.xml') return cfg ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7418911 sahara-16.0.0/sahara/service/edp/resources/0000775000175000017500000000000000000000000020541 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/resources/edp-main-wrapper.jar0000664000175000017500000001172300000000000024413 0ustar00zuulzuul00000000000000PK zE META-INF/PK zEg}META-INF/MANIFEST.MFMLK-. K-*ϳR03r,J,K-BV+$xRKRSt* 3R|RxJ3sJىUٙ]lzF\\PK tEorg/PK tEorg/openstack/PK tEorg/openstack/sahara/PK tEorg/openstack/sahara/edp/PK zE43[WAorg/openstack/sahara/edp/MainWrapper$WrapperSecurityManager.classU]SW~lH`EM@ՊFe&^la%fv7jfL{׋(nc$t7=>1OWQKc:PMuZ-h(kXJ#~K-SVߥᎆ5 dTlƞٞҼ6$m/gjc6'0U}n04e-#p$[) sEKZf LUǑJ}Irֶۖ[g6jQ>id-imKo}u8a,eh󹴂(AF8i?r@* H] 3GPlA*ʔdK^ݵd; p 5qRKV+) ϒkR+gIiq WuL⤎qN-yq'8>S&p5lyc*,1[r&pW-8EWh a.0rqN/G<72 / 81BeY~(&1FZuj !4Xmz5ڇj~b~w )," L61C&'Q|J?91K] އ.S2CFNs NqC 1q-u8LBUdq/\,Y'HQd +F^#x7H@{rԓ}]dcɽ&K"k=VjU@bDrAq8DZZMULl![m\\8#:Kvjr> Eqώ@ܖOX4?5 5%H_45B2Dw~, ؏}̍k@,BXľ]{&RSXgLyfSM lHˊxw+T<8#~ =cĸxP\}Z7qYG>{|c.=>3FA\#}eF;.֟q_ /7ŷHVa#8N.G-:E"}Ƹ dȿ;(p'."ʔ1W")uFg l#,Vos"QI !M3Ќ*Ij1kPK zEMETA-INF/maven/PK zE(META-INF/maven/org.openstack.sahara.edp/PK zE9META-INF/maven/org.openstack.sahara.edp/edp-main-wrapper/PK tE_@META-INF/maven/org.openstack.sahara.edp/edp-main-wrapper/pom.xmlU]o6}tPd qQmDN>Dl$R%^KRJl7{x~mSSKqFeEuܮކ$. 'Hef!VkfZb;htִVH[kQbRpm_w`Pce>GE ,_gY ]z]fX 7fHWr0[| 0aVHnG ;J -ysKTT hjX^,5oYj*rIhn7G2 u&ajr8σa5 }`b9+21Þm5U?9quahCC.qqig+?!!|xqqmu$"ndA#ɁK0ڴH)-UhĊ6AzU TL=funh(A/7@aۉ?wB?98=p'ٟ7Q 5={>brhBdr 7֡h񺈴͏r+ƖJk365{<ǣ""VxF{;p*A_҅R=+\1݆Xɳ8N7,fWںW9{5#"QD6 uKSj4Gd#>{Iy﹬zZkT D1ϸ797-c8%9ג[yRI6P 5/@1%FG Kb'iyH߰PK "u5FMETA-INF/maven/PK "u5F(META-INF/maven/org.openstack.sahara.edp/PK "u5F:META-INF/maven/org.openstack.sahara.edp/edp-spark-wrapper/PK t5FgRAMETA-INF/maven/org.openstack.sahara.edp/edp-spark-wrapper/pom.xmlUQo6~ }؀Ht(2E۸"Y Zd6Tdc͆b,}OrzpϔR4&rYpQ7wu2 %ϙЬNL04ǟ!s<<C*d! t!P5ikNEΠf $R|(PDSķx*q@c3BSIUڣ4LGE ,7,N6]_ެvv}=[ykx\\t;->Çtqq DŽeضUV?v;PJ/H,%ϱ/QubPI4ve.RYjpCTne&vM=*qBjzrɋhNIJ{oWϧS 64BwO3—2w __VA%F8ļJɮM"224PE#V1O% 0ꖪWDbMN0[̮ULƸG ڰd~q 1q9Þh LR,x UÙn"q=%-2٩ձG 'ѺuiWJڀC=xS 7Vv66(T/:w-UI]^_dxO*f|zx~d%OT:߰N]k>3uZSoEdC )yiJMF@^R^/'.끞x>"UCM☇'T1w'PK "u5F~HMETA-INF/maven/org.openstack.sahara.edp/edp-spark-wrapper/pom.properties ʽ 0=Oq9!u)tpHlZ qތ|Ձ f T(:cځkƵv{CmV$isuw[&>]D2IFAC#+ mapred.reduce.tasks -1 The default number of reduce tasks per job. Typically set to a prime close to the number of available hosts. Ignored when mapred.job.tracker is "local". Hadoop set this to 1 by default, whereas hive uses -1 as its default value. By setting this property to -1, Hive will automatically figure out what should be the number of reducers. hive.exec.reducers.bytes.per.reducer 1000000000 size per reducer.The default is 1G, i.e if the input size is 10G, it will use 10 reducers. hive.exec.reducers.max 999 max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is negative, hive will use this one as the max number of reducers when automatically determine number of reducers. hive.cli.print.header false Whether to print the names of the columns in query output. hive.cli.print.current.db false Whether to include the current database in the hive prompt. hive.cli.prompt hive Command line prompt configuration value. Other hiveconf can be used in this configuration value. Variable substitution will only be invoked at the hive cli startup. hive.cli.pretty.output.num.cols -1 The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command. If the value of this property is -1, then hive will use the auto-detected terminal width. hive.exec.scratchdir /tmp/hive-${user.name} Scratch space for Hive jobs hive.exec.local.scratchdir /tmp/${user.name} Local scratch space for Hive jobs hive.test.mode false whether hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename hive.test.mode.prefix test_ if hive is running in test mode, prefixes the output table by this string hive.test.mode.samplefreq 32 if hive is running in test mode and table is not bucketed, sampling frequency hive.test.mode.nosamplelist if hive is running in test mode, dont sample the above comma seperated list of tables hive.metastore.uris Thrift uri for the remote metastore. Used by metastore client to connect to remote metastore. javax.jdo.option.ConnectionURL jdbc:derby:;databaseName=metastore_db;create=true JDBC connect string for a JDBC metastore javax.jdo.option.ConnectionDriverName org.apache.derby.jdbc.EmbeddedDriver Driver class name for a JDBC metastore javax.jdo.PersistenceManagerFactoryClass org.datanucleus.jdo.JDOPersistenceManagerFactory class implementing the jdo persistence javax.jdo.option.DetachAllOnCommit true detaches all objects from session so that they can be used after transaction is committed javax.jdo.option.NonTransactionalRead true reads outside of transactions javax.jdo.option.ConnectionUserName APP username to use against metastore database javax.jdo.option.ConnectionPassword mine password to use against metastore database javax.jdo.option.Multithreaded true Set this to true if multiple threads access metastore through JDO concurrently. datanucleus.connectionPoolingType DBCP Uses a DBCP connection pool for JDBC metastore datanucleus.validateTables false validates existing schema against code. turn this on if you want to verify existing schema datanucleus.validateColumns false validates existing schema against code. turn this on if you want to verify existing schema datanucleus.validateConstraints false validates existing schema against code. turn this on if you want to verify existing schema datanucleus.storeManagerType rdbms metadata store type datanucleus.autoCreateSchema true creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once datanucleus.autoStartMechanismMode checked throw exception if metadata tables are incorrect datanucleus.transactionIsolation read-committed Default transaction isolation level for identity generation. datanucleus.cache.level2 false Use a level 2 cache. Turn this off if metadata is changed independently of hive metastore server datanucleus.cache.level2.type SOFT SOFT=soft reference based cache, WEAK=weak reference based cache. datanucleus.identifierFactory datanucleus Name of the identifier factory to use when generating table/column names etc. 'datanucleus' is used for backward compatibility datanucleus.plugin.pluginRegistryBundleCheck LOG Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE] hive.metastore.warehouse.dir /user/hive/warehouse location of default database for the warehouse hive.metastore.execute.setugi false In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored. hive.metastore.event.listeners list of comma seperated listeners for metastore events. hive.metastore.partition.inherit.table.properties list of comma seperated keys occurring in table properties which will get inherited to newly created partitions. * implies all the keys will get inherited. hive.metadata.export.location When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, it is the location to which the metadata will be exported. The default is an empty string, which results in the metadata being exported to the current user's home directory on HDFS. hive.metadata.move.exported.metadata.to.trash When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data. hive.metastore.partition.name.whitelist.pattern Partition names will be checked against this regex pattern and rejected if not matched. hive.metastore.end.function.listeners list of comma separated listeners for the end of metastore functions. hive.metastore.event.expiry.duration 0 Duration after which events expire from events table (in seconds) hive.metastore.event.clean.freq 0 Frequency at which timer task runs to purge expired events in metastore(in seconds). hive.metastore.connect.retries 5 Number of retries while opening a connection to metastore hive.metastore.failure.retries 3 Number of retries upon failure of Thrift metastore calls hive.metastore.client.connect.retry.delay 1 Number of seconds for the client to wait between consecutive connection attempts hive.metastore.client.socket.timeout 20 MetaStore Client socket timeout in seconds hive.metastore.rawstore.impl org.apache.hadoop.hive.metastore.ObjectStore Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. This class is used to store and retrieval of raw metadata objects such as table, database hive.metastore.batch.retrieve.max 300 Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. The higher the number, the less the number of round trips is needed to the Hive metastore server, but it may also cause higher memory requirement at the client side. hive.metastore.batch.retrieve.table.partition.max 1000 Maximum number of table partitions that metastore internally retrieves in one batch. hive.default.fileformat TextFile Default file format for CREATE TABLE statement. Options are TextFile and SequenceFile. Users can explicitly say CREATE TABLE ... STORED AS <TEXTFILE|SEQUENCEFILE> to override hive.fileformat.check true Whether to check file format or not when loading data files hive.map.aggr true Whether to use map-side aggregation in Hive Group By queries hive.groupby.skewindata false Whether there is skew in data to optimize group by queries hive.optimize.multigroupby.common.distincts true Whether to optimize a multi-groupby query with the same distinct. Consider a query like: from src insert overwrite table dest1 select col1, count(distinct colx) group by col1 insert overwrite table dest2 select col2, count(distinct colx) group by col2; With this parameter set to true, first we spray by the distinct value (colx), and then perform the 2 groups bys. This makes sense if map-side aggregation is turned off. However, with maps-side aggregation, it might be useful in some cases to treat the 2 inserts independently, thereby performing the query above in 2MR jobs instead of 3 (due to spraying by distinct key first). If this parameter is turned off, we dont consider the fact that the distinct key is the same across different MR jobs. hive.groupby.mapaggr.checkinterval 100000 Number of rows after which size of the grouping keys/aggregation classes is performed hive.mapred.local.mem 0 For local mode, memory of the mappers/reducers hive.mapjoin.followby.map.aggr.hash.percentmemory 0.3 Portion of total memory to be used by map-side grup aggregation hash table, when this group by is followed by map join hive.map.aggr.hash.force.flush.memory.threshold 0.9 The max memory to be used by map-side grup aggregation hash table, if the memory usage is higher than this number, force to flush data hive.map.aggr.hash.percentmemory 0.5 Portion of total memory to be used by map-side grup aggregation hash table hive.map.aggr.hash.min.reduction 0.5 Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. Set to 1 to make sure hash aggregation is never turned off. hive.optimize.cp true Whether to enable column pruner hive.optimize.index.filter false Whether to enable automatic use of indexes hive.optimize.index.groupby false Whether to enable optimization of group-by queries using Aggregate indexes. hive.optimize.ppd true Whether to enable predicate pushdown hive.optimize.ppd.storage true Whether to push predicates down into storage handlers. Ignored when hive.optimize.ppd is false. hive.ppd.recognizetransivity true Whether to transitively replicate predicate filters over equijoin conditions. hive.optimize.groupby true Whether to enable the bucketed group by from bucketed partitions/tables. hive.optimize.skewjoin.compiletime false Whether to create a separate plan for skewed keys for the tables in the join. This is based on the skewed keys stored in the metadata. At compile time, the plan is broken into different joins: one for the skewed keys, and the other for the remaining keys. And then, a union is performed for the 2 joins generated above. So unless the same skewed key is present in both the joined tables, the join for the skewed key will be performed as a map-side join. The main difference between this paramater and hive.optimize.skewjoin is that this parameter uses the skew information stored in the metastore to optimize the plan at compile time itself. If there is no skew information in the metadata, this parameter will not have any affect. Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true. Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing so for backward compatibility. If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op. hive.optimize.union.remove false Whether to remove the union and push the operators between union and the filesink above union. This avoids an extra scan of the output by union. This is independently useful for union queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an extra union is inserted. The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true. If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the number of reducers are few, so the number of files anyway are small. However, with this optimization, we are increasing the number of files possibly by a big margin. So, we merge aggresively. hive.mapred.supports.subdirectories false Whether the version of hadoop which is running supports sub-directories for tables/partitions. Many hive optimizations can be applied if the hadoop version supports sub-directories for tables/partitions. It was added by MAPREDUCE-1501 hive.multigroupby.singlemr false Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has common group by keys, it will be optimized to generate single M/R job. hive.map.groupby.sorted false If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this is that it limits the number of mappers to the number of files. hive.map.groupby.sorted.testmode false If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform the group by in the mapper by using BucketizedHiveInputFormat. If the test mode is set, the plan is not converted, but a query property is set to denote the same. hive.new.job.grouping.set.cardinality 30 Whether a new map-reduce job should be launched for grouping sets/rollups/cubes. For a query like: select a, b, c, count(1) from T group by a, b, c with rollup; 4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null). This can lead to explosion across map-reduce boundary if the cardinality of T is very high, and map-side aggregation does not do a very good job. This parameter decides if hive should add an additional map-reduce job. If the grouping set cardinality (4 in the example above), is more than this value, a new MR job is added under the assumption that the orginal group by will reduce the data size. hive.join.emit.interval 1000 How many rows in the right-most join operand Hive should buffer before emitting the join result. hive.join.cache.size 25000 How many rows in the joining tables (except the streaming table) should be cached in memory. hive.mapjoin.bucket.cache.size 100 How many values in each keys in the map-joined table should be cached in memory. hive.mapjoin.cache.numrows 25000 How many rows should be cached by jdbm for map join. hive.optimize.skewjoin false Whether to enable skew join optimization. The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of processing those keys, store them temporarily in a hdfs directory. In a follow-up map-reduce job, process those skewed keys. The same key need not be skewed for all the tables, and so, the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a map-join. hive.skewjoin.key 100000 Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator, we think the key as a skew join key. hive.skewjoin.mapjoin.map.tasks 10000 Determine the number of map task used in the follow up map join job for a skew join. It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control. hive.skewjoin.mapjoin.min.split 33554432 Determine the number of map task at most used in the follow up map join job for a skew join by specifying the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control. hive.mapred.mode nonstrict The mode in which the hive operations are being performed. In strict mode, some risky queries are not allowed to run. They include: Cartesian Product. No partition being picked up for a query. Comparing bigints and strings. Comparing bigints and doubles. Orderby without limit. hive.enforce.bucketmapjoin false If the user asked for bucketed map-side join, and it cannot be performed, should the query fail or not ? For eg, if the buckets in the tables being joined are not a multiple of each other, bucketed map-side join cannot be performed, and the query will fail if hive.enforce.bucketmapjoin is set to true. hive.exec.script.maxerrsize 100000 Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). This prevents runaway scripts from filling logs partitions to capacity hive.exec.script.allow.partial.consumption false When enabled, this option allows a user script to exit successfully without consuming all the data from the standard input. hive.script.operator.id.env.var HIVE_SCRIPT_OPERATOR_ID Name of the environment variable that holds the unique script operator ID in the user's transform function (the custom mapper/reducer that the user has specified in the query) hive.script.operator.truncate.env false Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits) hive.exec.compress.output false This controls whether the final outputs of a query (to a local/hdfs file or a hive table) is compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* hive.exec.compress.intermediate false This controls whether intermediate files produced by hive between multiple map-reduce jobs are compressed. The compression codec and other options are determined from hadoop config variables mapred.output.compress* hive.exec.parallel false Whether to execute jobs in parallel hive.exec.parallel.thread.number 8 How many jobs at most can be executed in parallel hive.exec.rowoffset false Whether to provide the row offset virtual column hive.task.progress false Whether Hive should periodically update task progress counters during execution. Enabling this allows task progress to be monitored more closely in the job tracker, but may impose a performance penalty. This flag is automatically set to true for jobs with hive.exec.dynamic.partition set to true. hive.hwi.war.file lib/hive-hwi-0.11.0.war This sets the path to the HWI war file, relative to ${HIVE_HOME}. hive.hwi.listen.host 0.0.0.0 This is the host address the Hive Web Interface will listen on hive.hwi.listen.port 9999 This is the port the Hive Web Interface will listen on hive.exec.pre.hooks Comma-separated list of pre-execution hooks to be invoked for each statement. A pre-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. hive.exec.post.hooks Comma-separated list of post-execution hooks to be invoked for each statement. A post-execution hook is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. hive.exec.failure.hooks Comma-separated list of on-failure hooks to be invoked for each statement. An on-failure hook is specified as the name of Java class which implements the org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface. hive.metastore.init.hooks A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. Aninit hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener. hive.client.stats.publishers Comma-separated list of statistics publishers to be invoked on counters on each job. A client stats publisher is specified as the name of a Java class which implements the org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface. hive.client.stats.counters Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used hive.merge.mapfiles true Merge small files at the end of a map-only job hive.merge.mapredfiles false Merge small files at the end of a map-reduce job hive.heartbeat.interval 1000 Send a heartbeat after this interval - used by mapjoin and filter operators hive.merge.size.per.task 256000000 Size of merged files at the end of the job hive.merge.smallfiles.avgsize 16000000 When the average output file size of a job is less than this number, Hive will start an additional map-reduce job to merge the output files into bigger files. This is only done for map-only jobs if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true. hive.mapjoin.smalltable.filesize 25000000 The threshold for the input file size of the small tables; if the file size is smaller than this threshold, it will try to convert the common join into map join hive.ignore.mapjoin.hint true Ignore the mapjoin hint hive.mapjoin.localtask.max.memory.usage 0.90 This number means how much memory the local task can take to hold the key/value into in-memory hash table; If the local task's memory usage is more than this number, the local task will be abort by themself. It means the data of small table is too large to be hold in the memory. hive.mapjoin.followby.gby.localtask.max.memory.usage 0.55 This number means how much memory the local task can take to hold the key/value into in-memory hash table when this map join followed by a group by; If the local task's memory usage is more than this number, the local task will be abort by themself. It means the data of small table is too large to be hold in the memory. hive.mapjoin.check.memory.rows 100000 The number means after how many rows processed it needs to check the memory usage hive.auto.convert.join false Whether Hive enable the optimization about converting common join into mapjoin based on the input file size hive.auto.convert.join.noconditionaltask true Whether Hive enable the optimization about converting common join into mapjoin based on the input file size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the specified size, the join is directly converted to a mapjoin (there is no conditional task). hive.auto.convert.join.noconditionaltask.size 10000000 If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB hive.optimize.mapjoin.mapreduce false If hive.auto.convert.join is off, this parameter does not take affect. If it is on, and if there are map-join jobs followed by a map-reduce job (for e.g a group by), each map-only job is merged with the following map-reduce job. hive.script.auto.progress false Whether Hive Tranform/Map/Reduce Clause should automatically send progress information to TaskTracker to avoid the task getting killed because of inactivity. Hive sends progress information when the script is outputting to stderr. This option removes the need of periodically producing stderr messages, but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker. hive.script.serde org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe The default serde for trasmitting input data to and reading output data from the user scripts. hive.binary.record.max.length 1000 Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. The last record before the end of stream can have less than hive.binary.record.max.length bytes hive.script.recordreader org.apache.hadoop.hive.ql.exec.TextRecordReader The default record reader for reading data from the user scripts. hive.script.recordwriter org.apache.hadoop.hive.ql.exec.TextRecordWriter The default record writer for writing data to the user scripts. hive.input.format org.apache.hadoop.hive.ql.io.CombineHiveInputFormat The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat. hive.udtf.auto.progress false Whether Hive should automatically send progress information to TaskTracker when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious because this may prevent TaskTracker from killing tasks with infinte loops. hive.mapred.reduce.tasks.speculative.execution true Whether speculative execution for reducers should be turned on. hive.exec.counters.pull.interval 1000 The interval with which to poll the JobTracker for the counters the running job. The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be. hive.querylog.location /tmp/${user.name} Location of Hive run time structured log file hive.querylog.enable.plan.progress true Whether to log the plan's progress every time a job's progress is checked. These logs are written to the location specified by hive.querylog.location hive.querylog.plan.progress.interval 60000 The interval to wait between logging the plan's progress in milliseconds. If there is a whole number percentage change in the progress of the mappers or the reducers, the progress is logged regardless of this value. The actual interval will be the ceiling of (this value divided by the value of hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be logged less frequently than specified. This only has an effect if hive.querylog.enable.plan.progress is set to true. hive.enforce.bucketing false Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced. hive.enforce.sorting false Whether sorting is enforced. If true, while inserting into the table, sorting is enforced. hive.optimize.bucketingsorting true If hive.enforce.bucketing or hive.enforce.sorting is true, dont create a reducer for enforcing bucketing/sorting for queries of the form: insert overwrite table T2 select * from T1; where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets. hive.enforce.sortmergebucketmapjoin false If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ? hive.auto.convert.sortmerge.join false Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join. hive.auto.convert.sortmerge.join.bigtable.selection.policy org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ The policy to choose the big table for automatic conversion to sort-merge join. By default, the table with the largest partitions is assigned the big table. All policies are: . based on position of the table - the leftmost table is selected org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ. . based on total size (all the partitions selected in the query) of the table org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ. . based on average size (all the partitions selected in the query) of the table org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ. New policies can be added in future. hive.metastore.ds.connection.url.hook Name of the hook to use for retriving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used hive.metastore.ds.retry.attempts 1 The number of times to retry a metastore call if there were a connection error hive.metastore.ds.retry.interval 1000 The number of miliseconds between metastore retry attempts hive.metastore.server.min.threads 200 Minimum number of worker threads in the Thrift server's pool. hive.metastore.server.max.threads 100000 Maximum number of worker threads in the Thrift server's pool. hive.metastore.server.tcp.keepalive true Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections. hive.metastore.sasl.enabled false If true, the metastore thrift interface will be secured with SASL. Clients must authenticate with Kerberos. hive.metastore.thrift.framed.transport.enabled false If true, the metastore thrift interface will use TFramedTransport. When false (default) a standard TTransport is used. hive.metastore.kerberos.keytab.file The path to the Kerberos Keytab file containing the metastore thrift server's service principal. hive.metastore.kerberos.principal hive-metastore/_HOST@EXAMPLE.COM The service principal for the metastore thrift server. The special string _HOST will be replaced automatically with the correct host name. hive.cluster.delegation.token.store.class org.apache.hadoop.hive.thrift.MemoryTokenStore The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster. hive.cluster.delegation.token.store.zookeeper.connectString localhost:2181 The ZooKeeper token store connect string. hive.cluster.delegation.token.store.zookeeper.znode /hive/cluster/delegation The root path for token store data. hive.cluster.delegation.token.store.zookeeper.acl sasl:hive/host1@EXAMPLE.COM:cdrwa,sasl:hive/host2@EXAMPLE.COM:cdrwa ACL for token store entries. List comma separated all server principals for the cluster. hive.metastore.cache.pinobjtypes Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order List of comma separated metastore object types that should be pinned in the cache hive.optimize.reducededuplication true Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. This should always be set to true. Since it is a new feature, it has been made configurable. hive.optimize.reducededuplication.min.reducer 4 Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR. The optimization will be disabled if number of reducers is less than specified value. hive.exec.dynamic.partition true Whether or not to allow dynamic partitions in DML/DDL. hive.exec.dynamic.partition.mode strict In strict mode, the user must specify at least one static partition in case the user accidentally overwrites all partitions. hive.exec.max.dynamic.partitions 1000 Maximum number of dynamic partitions allowed to be created in total. hive.exec.max.dynamic.partitions.pernode 100 Maximum number of dynamic partitions allowed to be created in each mapper/reducer node. hive.exec.max.created.files 100000 Maximum number of HDFS files created by all mappers/reducers in a MapReduce job. hive.exec.default.partition.name __HIVE_DEFAULT_PARTITION__ The default partition name in case the dynamic partition column value is null/empty string or anyother values that cannot be escaped. This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). The user has to be aware that the dynamic partition value should not contain this value to avoid confusions. hive.stats.dbclass jdbc:derby The default database that stores temporary hive statistics. hive.stats.autogather true A flag to gather statistics automatically during the INSERT OVERWRITE command. hive.stats.jdbcdriver org.apache.derby.jdbc.EmbeddedDriver The JDBC driver for the database that stores temporary hive statistics. hive.stats.dbconnectionstring jdbc:derby:;databaseName=TempStatsStore;create=true The default connection string for the database that stores temporary hive statistics. hive.stats.default.publisher The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is not JDBC or HBase. hive.stats.default.aggregator The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is not JDBC or HBase. hive.stats.jdbc.timeout 30 Timeout value (number of seconds) used by JDBC connection and statements. hive.stats.retries.max 0 Maximum number of retries when stats publisher/aggregator got an exception updating intermediate database. Default is no tries on failures. hive.stats.retries.wait 3000 The base waiting window (in milliseconds) before the next retry. The actual wait time is calculated by baseWindow * failues baseWindow * (failure 1) * (random number between [0.0,1.0]). hive.stats.reliable false Whether queries will fail because stats cannot be collected completely accurately. If this is set to true, reading/writing from/into a partition may fail becuase the stats could not be computed accurately. hive.stats.collect.tablekeys false Whether join and group by keys on tables are derived and maintained in the QueryPlan. This is useful to identify how tables are accessed and to determine if they should be bucketed. hive.stats.collect.scancols false Whether column accesses are tracked in the QueryPlan. This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed. hive.stats.ndv.error 20.0 Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost.A lower value for error indicates higher accuracy and a higher compute cost. hive.stats.key.prefix.max.length 200 Determines if when the prefix of the key used for intermediate stats collection exceeds a certain length, a hash of the key is used instead. If the value < 0 then hashing is never used, if the value >= 0 then hashing is used only when the key prefixes length exceeds that value. The key prefix is defined as everything preceding the task ID in the key. hive.support.concurrency false Whether hive supports concurrency or not. A zookeeper instance must be up and running for the default hive lock manager to support read-write locks. hive.lock.numretries 100 The number of times you want to try to get all the locks hive.unlock.numretries 10 The number of times you want to retry to do one unlock hive.lock.sleep.between.retries 60 The sleep time (in seconds) between various retries hive.zookeeper.quorum The list of zookeeper servers to talk to. This is only needed for read/write locks. hive.zookeeper.client.port 2181 The port of zookeeper servers to talk to. This is only needed for read/write locks. hive.zookeeper.session.timeout 600000 Zookeeper client's session timeout. The client is disconnected, and as a result, all locks released, if a heartbeat is not sent in the timeout. hive.zookeeper.namespace hive_zookeeper_namespace The parent node under which all zookeeper nodes are created. hive.zookeeper.clean.extra.nodes false Clean extra nodes at the end of the session. fs.har.impl org.apache.hadoop.hive.shims.HiveHarFileSystem The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop vers less than 0.20 hive.archive.enabled false Whether archiving operations are permitted hive.fetch.output.serde org.apache.hadoop.hive.serde2.DelimitedJSONSerDe The serde used by FetchTask to serialize the fetch output. hive.exec.mode.local.auto false Let hive determine whether to run in local mode automatically hive.exec.drop.ignorenonexistent true Do not report an error if DROP TABLE/VIEW specifies a non-existent table/view hive.exec.show.job.failure.debug.info true If a job fails, whether to provide a link in the CLI to the task with the most failures, along with debugging hints if applicable. hive.auto.progress.timeout 0 How long to run autoprogressor for the script/UDTF operators (in seconds). Set to 0 for forever. hive.hbase.wal.enabled true Whether writes to HBase should be forced to the write-ahead log. Disabling this improves HBase write performance at the risk of lost writes in case of a crash. hive.table.parameters.default Default property values for newly created tables hive.entity.separator @ Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname hive.ddl.createtablelike.properties.whitelist Table Properties to copy over when executing a Create Table Like. hive.variable.substitute true This enables substitution using syntax like ${var} ${system:var} and ${env:var}. hive.variable.substitute.depth 40 The maximum replacements the substitution engine will do. hive.conf.validation true Eables type checking for registered hive configurations hive.security.authorization.enabled false enable or disable the hive client authorization hive.security.authorization.manager org.apache.hadoop.hive.ql.security.authorization.DefaultHiveAuthorizationProvider the hive client authorization manager class name. The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider. hive.security.metastore.authorization.manager org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider authorization manager class name to be used in the metastore for authorization. The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider. hive.security.authenticator.manager org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator hive client authenticator manager class name. The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. hive.security.metastore.authenticator.manager org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator authenticator manager class name to be used in the metastore for authentication. The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider. hive.security.authorization.createtable.user.grants the privileges automatically granted to some users whenever a table gets created. An example like "userX,userY:select;userZ:create" will grant select privilege to userX and userY, and grant create privilege to userZ whenever a new table created. hive.security.authorization.createtable.group.grants the privileges automatically granted to some groups whenever a table gets created. An example like "groupX,groupY:select;groupZ:create" will grant select privilege to groupX and groupY, and grant create privilege to groupZ whenever a new table created. hive.security.authorization.createtable.role.grants the privileges automatically granted to some roles whenever a table gets created. An example like "roleX,roleY:select;roleZ:create" will grant select privilege to roleX and roleY, and grant create privilege to roleZ whenever a new table created. hive.security.authorization.createtable.owner.grants the privileges automatically granted to the owner whenever a table gets created. An example like "select,drop" will grant select and drop privilege to the owner of the table hive.metastore.authorization.storage.checks false Should the metastore do authorization checks against the underlying storage for operations like drop-partition (disallow the drop-partition if the user in question doesn't have permissions to delete the corresponding directory on the storage). hive.error.on.empty.partition false Whether to throw an excpetion if dynamic partition insert generates empty results. hive.index.compact.file.ignore.hdfs false True the hdfs location stored in the index file will be igbored at runtime. If the data got moved or the name of the cluster got changed, the index data should still be usable. hive.optimize.index.filter.compact.minsize 5368709120 Minimum size (in bytes) of the inputs on which a compact index is automatically used. hive.optimize.index.filter.compact.maxsize -1 Maximum size (in bytes) of the inputs on which a compact index is automatically used. A negative number is equivalent to infinity. hive.index.compact.query.max.size 10737418240 The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity. hive.index.compact.query.max.entries 10000000 The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity. hive.index.compact.binary.search true Whether or not to use a binary search to find the entries in an index table that match the filter, where possible hive.exim.uri.scheme.whitelist hdfs,pfile A comma separated list of acceptable URI schemes for import and export. hive.lock.mapred.only.operation false This param is to control whether or not only do lock on queries that need to execute at least one mapred job. hive.limit.row.max.size 100000 When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least. hive.limit.optimize.limit.file 10 When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample. hive.limit.optimize.enable false Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first. hive.limit.optimize.fetch.max 50000 Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. Insert queries are not restricted by this limit. hive.rework.mapredwork false should rework the mapred work or not. This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time. hive.exec.concatenate.check.index true If this sets to true, hive will throw error when doing 'alter table tbl_name [partSpec] concatenate' on a table/partition that has indexes on it. The reason the user want to set this to true is because it can help user to avoid handling all index drop, recreation, rebuild work. This is very helpful for tables with thousands of partitions. hive.sample.seednumber 0 A number used to percentage sampling. By changing this number, user will change the subsets of data sampled. hive.io.exception.handlers A list of io exception handler class names. This is used to construct a list exception handlers to handle exceptions thrown by record readers hive.autogen.columnalias.prefix.label _c String used as a prefix when auto generating column alias. By default the prefix label will be appended with a column position number to form the column alias. Auto generation would happen if an aggregate function is used in a select clause without an explicit alias. hive.autogen.columnalias.prefix.includefuncname false Whether to include function name in the column alias auto generated by hive. hive.exec.perf.logger org.apache.hadoop.hive.ql.log.PerfLogger The class responsible logging client side performance metrics. Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger hive.start.cleanup.scratchdir false To cleanup the hive scratchdir while starting the hive server hive.output.file.extension String used as a file extension for output files. If not set, defaults to the codec extension for text files (e.g. ".gz"), or no extension otherwise. hive.insert.into.multilevel.dirs false Where to insert into multilevel directories like "insert directory '/HIVEFT25686/chinna/' from table" hive.warehouse.subdir.inherit.perms false Set this to true if the the table directories should inherit the permission of the warehouse or database directory instead of being created with the permissions derived from dfs umask hive.exec.job.debug.capture.stacktraces true Whether or not stack traces parsed from the task logs of a sampled failed task for each failed job should be stored in the SessionState hive.exec.driver.run.hooks A comma separated list of hooks which implement HiveDriverRunHook and will be run at the beginning and end of Driver.run, these will be run in the order specified hive.ddl.output.format text The data format to use for DDL output. One of "text" (for human readable text) or "json" (for a json object). hive.transform.escape.input false This adds an option to escape special chars (newlines, carriage returns and tabs) when they are passed to the user script. This is useful if the hive tables can contain data that contains special characters. hive.exec.rcfile.use.explicit.header true If this is set the header for RC Files will simply be RCF. If this is not set the header will be that borrowed from sequence files, e.g. SEQ- followed by the input and output RC File formats. hive.multi.insert.move.tasks.share.dependencies false If this is set all move tasks for tables/partitions (not directories) at the end of a multi-insert query will only begin once the dependencies for all these move tasks have been met. Advantages: If concurrency is enabled, the locks will only be released once the query has finished, so with this config enabled, the time when the table/partition is generated will be much closer to when the lock on it is released. Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which are produced by this query and finish earlier will be available for querying much earlier. Since the locks are only released once the query finishes, this does not apply if concurrency is enabled. hive.fetch.task.conversion minimal Some select queries can be converted to single FETCH task minimizing latency. Currently the query should be single sourced not having any subquery and should not have any aggregations or distincts (which incurrs RS), lateral views and joins. 1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only 2. more : SELECT, FILTER, LIMIT only (TABLESAMPLE, virtual columns) hive.hmshandler.retry.attempts 1 The number of times to retry a HMSHandler call if there were a connection error hive.hmshandler.retry.interval 1000 The number of miliseconds between HMSHandler retry attempts hive.server.read.socket.timeout 10 Timeout for the HiveServer to close the connection if no response from the client in N seconds, defaults to 10 seconds. hive.server.tcp.keepalive true Whether to enable TCP keepalive for the Hive server. Keepalive will prevent accumulation of half-open connections. hive.decode.partition.name false Whether to show the unquoted partition names in query results. hive.log4j.file Hive log4j configuration file. If the property is not set, then logging will be initialized using hive-log4j.properties found on the classpath. If the property is set, the value must be a valid URI (java.net.URI, e.g. "file:///tmp/my-logging.properties"), which you can then extract a URL from and pass to PropertyConfigurator.configure(URL). hive.exec.log4j.file Hive log4j configuration file for execution mode(sub command). If the property is not set, then logging will be initialized using hive-exec-log4j.properties found on the classpath. If the property is set, the value must be a valid URI (java.net.URI, e.g. "file:///tmp/my-logging.properties"), which you can then extract a URL from and pass to PropertyConfigurator.configure(URL). hive.exec.infer.bucket.sort false If this is set, when writing partitions, the metadata will include the bucketing/sorting properties with which the data was written if any (this will not overwrite the metadata inherited from the table if the table is bucketed/sorted) hive.exec.infer.bucket.sort.num.buckets.power.two false If this is set, when setting the number of reducers for the map reduce task which writes the final output files, it will choose a number which is a power of two, unless the user specifies the number of reducers to use using mapred.reduce.tasks. The number of reducers may be set to a power of two, only to be followed by a merge task meaning preventing anything from being inferred. With hive.exec.infer.bucket.sort set to true: Advantages: If this is not set, the number of buckets for partitions will seem arbitrary, which means that the number of mappers used for optimized joins, for example, will be very low. With this set, since the number of buckets used for any partition is a power of two, the number of mappers used for optimized joins will be the least number of buckets used by any partition being joined. Disadvantages: This may mean a much larger or much smaller number of reducers being used in the final map reduce job, e.g. if a job was originally going to take 257 reducers, it will now take 512 reducers, similarly if the max number of reducers is 511, and a job was going to use this many, it will now use 256 reducers. hive.groupby.orderby.position.alias false Whether to enable using Column Position Alias in Group By or Order By hive.server2.thrift.min.worker.threads 5 Minimum number of Thrift worker threads hive.server2.thrift.max.worker.threads 100 Maximum number of Thrift worker threads hive.server2.thrift.port 10000 Port number of HiveServer2 Thrift interface. Can be overridden by setting $HIVE_SERVER2_THRIFT_PORT hive.server2.thrift.bind.host localhost Bind host on which to run the HiveServer2 Thrift interface. Can be overridden by setting $HIVE_SERVER2_THRIFT_BIND_HOST hive.server2.authentication NONE Client authentication types. NONE: no authentication check LDAP: LDAP/AD based authentication KERBEROS: Kerberos/GSSAPI authentication CUSTOM: Custom authentication provider (Use with property hive.server2.custom.authentication.class) hive.server2.custom.authentication.class Custom authentication class. Used when property 'hive.server2.authentication' is set to 'CUSTOM'. Provided class must be a proper implementation of the interface org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2 will call its Authenticate(user, passed) method to authenticate requests. The implementation may optionally extend the Hadoop's org.apache.hadoop.conf.Configured class to grab Hive's Configuration object. >hive.server2.authentication.kerberos.principal Kerberos server principal >hive.server2.authentication.kerberos.keytab Kerberos keytab file for server principal hive.server2.authentication.ldap.url LDAP connection URL hive.server2.authentication.ldap.baseDN LDAP base DN hive.server2.enable.doAs true Setting this property to true will have hive server2 execute hive operations as the user making the calls to it. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/resources/launch_command.py0000664000175000017500000000573500000000000024075 0ustar00zuulzuul00000000000000#!/usr/bin/env {{PYTHON_VERSION}} # Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Script will be executed on cluster nodes, so oslo_log as requirement # Should be avoided. import logging import signal import subprocess # nosec import sys log = logging.getLogger() hdlr = logging.FileHandler(sys.argv[0]+".log") formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) def make_handler(a): def handle_signal(signum, stack): a.send_signal(signum) log.info("Sent SIGINT to subprocess") return handle_signal def parse_env_vars(): index = 1 env_vars = {} for var in sys.argv[1:]: # all environment parameters should be listed before the # executable, which is not suppose to contain the "=" sign # in the name kv_pair = var.split("=") if len(kv_pair) == 2: key, value = kv_pair env_vars[key.strip()] = value.strip() index += 1 else: break return env_vars, sys.argv[index:] log.info("Running %s" % ' '.join(sys.argv[1:])) try: # "Unignore" SIGINT before the subprocess is launched # in case this process is running in the background # (background processes ignore SIGINT) signal.signal(signal.SIGINT, signal.SIG_DFL) # Separate between command including arguments and # environment variables env, args = parse_env_vars() # TODO(elmiko) this script should be evaluated to insure that # arguments sent to the subprocess from sahara are valid in # some manner. # Interpret all command line args as the command to run a = subprocess.Popen(args, # nosec env=env, stdout=open("stdout", "w"), stderr=open("stderr", "w")) # Set our handler to trap SIGINT and propagate to the child # The expectation is that the child process handles SIGINT # and exits. signal.signal(signal.SIGINT, make_handler(a)) # Write out the childpid just in case there is a # need to send special signals directly to the child process open("childpid", "w").write("%s\n" % a.pid) # Wait for child to exit and write result file log.info("Waiting for subprocess %s" % a.pid) ret = a.wait() log.info("Subprocess exit status %s" % ret) open("result", "w").write("%s\n" % ret) except Exception as e: log.exception(e) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/resources/mapred-default.xml0000664000175000017500000021053400000000000024162 0ustar00zuulzuul00000000000000 mapreduce.jobtracker.jobhistory.location If job tracker is static the history files are stored in this single well known place. If No value is set here, by default, it is in the local file system at ${hadoop.log.dir}/history. mapreduce.jobtracker.jobhistory.task.numberprogresssplits 12 Every task attempt progresses from 0.0 to 1.0 [unless it fails or is killed]. We record, for each task attempt, certain statistics over each twelfth of the progress range. You can change the number of intervals we divide the entire range of progress into by setting this property. Higher values give more precision to the recorded data, but costs more memory in the job tracker at runtime. Each increment in this attribute costs 16 bytes per running task. mapreduce.job.userhistorylocation User can specify a location to store the history files of a particular job. If nothing is specified, the logs are stored in output directory. The files are stored in "_logs/history/" in the directory. User can stop logging by giving the value "none". mapreduce.jobtracker.jobhistory.completed.location The completed job history files are stored at this single well known location. If nothing is specified, the files are stored at ${mapreduce.jobtracker.jobhistory.location}/done. mapreduce.job.committer.setup.cleanup.needed true true, if job needs job-setup and job-cleanup. false, otherwise mapreduce.task.io.sort.factor 10 The number of streams to merge at once while sorting files. This determines the number of open file handles. mapreduce.task.io.sort.mb 100 The total amount of buffer memory to use while sorting files, in megabytes. By default, gives each merge stream 1MB, which should minimize seeks. mapreduce.map.sort.spill.percent 0.80 The soft limit in the serialization buffer. Once reached, a thread will begin to spill the contents to disk in the background. Note that collection will not block if this threshold is exceeded while a spill is already in progress, so spills may be larger than this threshold when it is set to less than .5 mapreduce.jobtracker.address local The host and port that the MapReduce job tracker runs at. If "local", then jobs are run in-process as a single map and reduce task. mapreduce.local.clientfactory.class.name org.apache.hadoop.mapred.LocalClientFactory This the client factory that is responsible for creating local job runner client mapreduce.jobtracker.http.address 0.0.0.0:50030 The job tracker http server address and port the server will listen on. If the port is 0 then the server will start on a free port. mapreduce.jobtracker.handler.count 10 The number of server threads for the JobTracker. This should be roughly 4% of the number of tasktracker nodes. mapreduce.tasktracker.report.address 127.0.0.1:0 The interface and port that task tracker server listens on. Since it is only connected to by the tasks, it uses the local interface. EXPERT ONLY. Should only be changed if your host does not have the loopback interface. mapreduce.cluster.local.dir ${hadoop.tmp.dir}/mapred/local The local directory where MapReduce stores intermediate data files. May be a comma-separated list of directories on different devices in order to spread disk i/o. Directories that do not exist are ignored. mapreduce.jobtracker.system.dir ${hadoop.tmp.dir}/mapred/system The directory where MapReduce stores control files. mapreduce.jobtracker.staging.root.dir ${hadoop.tmp.dir}/mapred/staging The root of the staging area for users' job files In practice, this should be the directory where users' home directories are located (usually /user) mapreduce.cluster.temp.dir ${hadoop.tmp.dir}/mapred/temp A shared directory for temporary files. mapreduce.tasktracker.local.dir.minspacestart 0 If the space in mapreduce.cluster.local.dir drops under this, do not ask for more tasks. Value in bytes. mapreduce.tasktracker.local.dir.minspacekill 0 If the space in mapreduce.cluster.local.dir drops under this, do not ask more tasks until all the current ones have finished and cleaned up. Also, to save the rest of the tasks we have running, kill one of them, to clean up some space. Start with the reduce tasks, then go with the ones that have finished the least. Value in bytes. mapreduce.jobtracker.expire.trackers.interval 600000 Expert: The time-interval, in miliseconds, after which a tasktracker is declared 'lost' if it doesn't send heartbeats. mapreduce.tasktracker.instrumentation org.apache.hadoop.mapred.TaskTrackerMetricsInst Expert: The instrumentation class to associate with each TaskTracker. mapreduce.tasktracker.resourcecalculatorplugin Name of the class whose instance will be used to query resource information on the tasktracker. The class must be an instance of org.apache.hadoop.util.ResourceCalculatorPlugin. If the value is null, the tasktracker attempts to use a class appropriate to the platform. Currently, the only platform supported is Linux. mapreduce.tasktracker.taskmemorymanager.monitoringinterval 5000 The interval, in milliseconds, for which the tasktracker waits between two cycles of monitoring its tasks' memory usage. Used only if tasks' memory management is enabled via mapred.tasktracker.tasks.maxmemory. mapreduce.tasktracker.tasks.sleeptimebeforesigkill 5000 The time, in milliseconds, the tasktracker waits for sending a SIGKILL to a task, after it has been sent a SIGTERM. This is currently not used on WINDOWS where tasks are just sent a SIGTERM. mapreduce.job.maps 2 The default number of map tasks per job. Ignored when mapreduce.jobtracker.address is "local". mapreduce.job.reduces 1 The default number of reduce tasks per job. Typically set to 99% of the cluster's reduce capacity, so that if a node fails the reduces can still be executed in a single wave. Ignored when mapreduce.jobtracker.address is "local". mapreduce.jobtracker.restart.recover false "true" to enable (job) recovery upon restart, "false" to start afresh mapreduce.jobtracker.jobhistory.block.size 3145728 The block size of the job history file. Since the job recovery uses job history, its important to dump job history to disk as soon as possible. Note that this is an expert level parameter. The default value is set to 3 MB. mapreduce.jobtracker.taskscheduler org.apache.hadoop.mapred.JobQueueTaskScheduler The class responsible for scheduling the tasks. mapreduce.job.reducer.preempt.delay.sec 0 The threshold in terms of seconds after which an unsatisfied mapper request triggers reducer preemption to free space. Default 0 implies that the reduces should be preempted immediately after allocation if there is currently no room for newly allocated mappers. mapreduce.job.max.split.locations 10 The max number of block locations to store for each split for locality calculation. mapreduce.job.split.metainfo.maxsize 10000000 The maximum permissible size of the split metainfo file. The JobTracker won't attempt to read split metainfo files bigger than the configured value. No limits if set to -1. mapreduce.jobtracker.taskscheduler.maxrunningtasks.perjob The maximum number of running tasks for a job before it gets preempted. No limits if undefined. mapreduce.map.maxattempts 4 Expert: The maximum number of attempts per map task. In other words, framework will try to execute a map task these many number of times before giving up on it. mapreduce.reduce.maxattempts 4 Expert: The maximum number of attempts per reduce task. In other words, framework will try to execute a reduce task these many number of times before giving up on it. mapreduce.reduce.shuffle.fetch.retry.enabled ${yarn.nodemanager.recovery.enabled} Set to enable fetch retry during host restart. mapreduce.reduce.shuffle.fetch.retry.interval-ms 1000 Time of interval that fetcher retry to fetch again when some non-fatal failure happens because of some events like NM restart. mapreduce.reduce.shuffle.fetch.retry.timeout-ms 30000 Timeout value for fetcher to retry to fetch again when some non-fatal failure happens because of some events like NM restart. mapreduce.reduce.shuffle.retry-delay.max.ms 60000 The maximum number of ms the reducer will delay before retrying to download map data. mapreduce.reduce.shuffle.parallelcopies 5 The default number of parallel transfers run by reduce during the copy(shuffle) phase. mapreduce.reduce.shuffle.connect.timeout 180000 Expert: The maximum amount of time (in milli seconds) reduce task spends in trying to connect to a tasktracker for getting map output. mapreduce.reduce.shuffle.read.timeout 180000 Expert: The maximum amount of time (in milli seconds) reduce task waits for map output data to be available for reading after obtaining connection. mapreduce.shuffle.connection-keep-alive.enable false set to true to support keep-alive connections. mapreduce.shuffle.connection-keep-alive.timeout 5 The number of seconds a shuffle client attempts to retain http connection. Refer "Keep-Alive: timeout=" header in Http specification mapreduce.task.timeout 600000 The number of milliseconds before a task will be terminated if it neither reads an input, writes an output, nor updates its status string. A value of 0 disables the timeout. mapreduce.tasktracker.map.tasks.maximum 2 The maximum number of map tasks that will be run simultaneously by a task tracker. mapreduce.tasktracker.reduce.tasks.maximum 2 The maximum number of reduce tasks that will be run simultaneously by a task tracker. mapreduce.map.memory.mb 1024 The amount of memory to request from the scheduler for each map task. mapreduce.map.cpu.vcores 1 The number of virtual cores to request from the scheduler for each map task. mapreduce.reduce.memory.mb 1024 The amount of memory to request from the scheduler for each reduce task. mapreduce.reduce.cpu.vcores 1 The number of virtual cores to request from the scheduler for each reduce task. mapreduce.jobtracker.retiredjobs.cache.size 1000 The number of retired job status to keep in the cache. mapreduce.tasktracker.outofband.heartbeat false Expert: Set this to true to let the tasktracker send an out-of-band heartbeat on task-completion for better latency. mapreduce.jobtracker.jobhistory.lru.cache.size 5 The number of job history files loaded in memory. The jobs are loaded when they are first accessed. The cache is cleared based on LRU. mapreduce.jobtracker.instrumentation org.apache.hadoop.mapred.JobTrackerMetricsInst Expert: The instrumentation class to associate with each JobTracker. mapred.child.java.opts -Xmx200m Java opts for the task processes. The following symbol, if present, will be interpolated: @taskid@ is replaced by current TaskID. Any other occurrences of '@' will go unchanged. For example, to enable verbose gc logging to a file named for the taskid in /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc Usage of -Djava.library.path can cause programs to no longer function if hadoop native libraries are used. These values should instead be set as part of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and mapreduce.reduce.env config settings. mapred.child.env User added environment variables for the task processes. Example : 1) A=foo This will set the env variable A to foo 2) B=$B:c This is inherit nodemanager's B env variable on Unix. 3) B=%B%;c This is inherit nodemanager's B env variable on Windows. mapreduce.admin.user.env Expert: Additional execution environment entries for map and reduce task processes. This is not an additive property. You must preserve the original value if you want your map and reduce tasks to have access to native libraries (compression, etc). When this value is empty, the command to set execution envrionment will be OS dependent: For linux, use LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native. For windows, use PATH = %PATH%;%HADOOP_COMMON_HOME%\\bin. mapreduce.task.tmp.dir ./tmp To set the value of tmp directory for map and reduce tasks. If the value is an absolute path, it is directly assigned. Otherwise, it is prepended with task's working directory. The java tasks are executed with option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and streaming are set with environment variable, TMPDIR='the absolute path of the tmp dir' mapreduce.map.log.level INFO The logging level for the map task. The allowed levels are: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL. mapreduce.reduce.log.level INFO The logging level for the reduce task. The allowed levels are: OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL. mapreduce.map.cpu.vcores 1 The number of virtual cores required for each map task. mapreduce.reduce.cpu.vcores 1 The number of virtual cores required for each reduce task. mapreduce.reduce.merge.inmem.threshold 1000 The threshold, in terms of the number of files for the in-memory merge process. When we accumulate threshold number of files we initiate the in-memory merge and spill to disk. A value of 0 or less than 0 indicates we want to DON'T have any threshold and instead depend only on the ramfs's memory consumption to trigger the merge. mapreduce.reduce.shuffle.merge.percent 0.66 The usage threshold at which an in-memory merge will be initiated, expressed as a percentage of the total memory allocated to storing in-memory map outputs, as defined by mapreduce.reduce.shuffle.input.buffer.percent. mapreduce.reduce.shuffle.input.buffer.percent 0.70 The percentage of memory to be allocated from the maximum heap size to storing map outputs during the shuffle. mapreduce.reduce.input.buffer.percent 0.0 The percentage of memory- relative to the maximum heap size- to retain map outputs during the reduce. When the shuffle is concluded, any remaining map outputs in memory must consume less than this threshold before the reduce can begin. mapreduce.reduce.shuffle.memory.limit.percent 0.25 Expert: Maximum percentage of the in-memory limit that a single shuffle can consume mapreduce.shuffle.ssl.enabled false Whether to use SSL for for the Shuffle HTTP endpoints. mapreduce.shuffle.ssl.file.buffer.size 65536 Buffer size for reading spills from file when using SSL. mapreduce.shuffle.max.connections 0 Max allowed connections for the shuffle. Set to 0 (zero) to indicate no limit on the number of connections. mapreduce.shuffle.max.threads 0 Max allowed threads for serving shuffle connections. Set to zero to indicate the default of 2 times the number of available processors (as reported by Runtime.availableProcessors()). Netty is used to serve requests, so a thread is not needed for each connection. mapreduce.shuffle.transferTo.allowed This option can enable/disable using nio transferTo method in the shuffle phase. NIO transferTo does not perform well on windows in the shuffle phase. Thus, with this configuration property it is possible to disable it, in which case custom transfer method will be used. Recommended value is false when running Hadoop on Windows. For Linux, it is recommended to set it to true. If nothing is set then the default value is false for Windows, and true for Linux. mapreduce.shuffle.transfer.buffer.size 131072 This property is used only if mapreduce.shuffle.transferTo.allowed is set to false. In that case, this property defines the size of the buffer used in the buffer copy code for the shuffle phase. The size of this buffer determines the size of the IO requests. mapreduce.reduce.markreset.buffer.percent 0.0 The percentage of memory -relative to the maximum heap size- to be used for caching values when using the mark-reset functionality. mapreduce.map.speculative true If true, then multiple instances of some map tasks may be executed in parallel. mapreduce.reduce.speculative true If true, then multiple instances of some reduce tasks may be executed in parallel. mapreduce.job.speculative.speculativecap 0.1 The max percent (0-1) of running tasks that can be speculatively re-executed at any time. mapreduce.job.map.output.collector.class org.apache.hadoop.mapred.MapTask$MapOutputBuffer The MapOutputCollector implementation(s) to use. This may be a comma-separated list of class names, in which case the map task will try to initialize each of the collectors in turn. The first to successfully initialize will be used. mapreduce.job.speculative.slowtaskthreshold 1.0 The number of standard deviations by which a task's ave progress-rates must be lower than the average of all running tasks' for the task to be considered too slow. mapreduce.job.speculative.slownodethreshold 1.0 The number of standard deviations by which a Task Tracker's ave map and reduce progress-rates (finishTime-dispatchTime) must be lower than the average of all successful map/reduce task's for the TT to be considered too slow to give a speculative task to. mapreduce.job.jvm.numtasks 1 How many tasks to run per jvm. If set to -1, there is no limit. mapreduce.job.ubertask.enable false Whether to enable the small-jobs "ubertask" optimization, which runs "sufficiently small" jobs sequentially within a single JVM. "Small" is defined by the following maxmaps, maxreduces, and maxbytes settings. Note that configurations for application masters also affect the "Small" definition - yarn.app.mapreduce.am.resource.mb must be larger than both mapreduce.map.memory.mb and mapreduce.reduce.memory.mb, and yarn.app.mapreduce.am.resource.cpu-vcores must be larger than both mapreduce.map.cpu.vcores and mapreduce.reduce.cpu.vcores to enable ubertask. Users may override this value. mapreduce.job.ubertask.maxmaps 9 Threshold for number of maps, beyond which job is considered too big for the ubertasking optimization. Users may override this value, but only downward. mapreduce.job.ubertask.maxreduces 1 Threshold for number of reduces, beyond which job is considered too big for the ubertasking optimization. CURRENTLY THE CODE CANNOT SUPPORT MORE THAN ONE REDUCE and will ignore larger values. (Zero is a valid max, however.) Users may override this value, but only downward. mapreduce.job.ubertask.maxbytes Threshold for number of input bytes, beyond which job is considered too big for the ubertasking optimization. If no value is specified, dfs.block.size is used as a default. Be sure to specify a default value in mapred-site.xml if the underlying filesystem is not HDFS. Users may override this value, but only downward. mapreduce.job.emit-timeline-data false Specifies if the Application Master should emit timeline data to the timeline server. Individual jobs can override this value. mapreduce.input.fileinputformat.split.minsize 0 The minimum size chunk that map input should be split into. Note that some file formats may have minimum split sizes that take priority over this setting. mapreduce.input.fileinputformat.list-status.num-threads 1 The number of threads to use to list and fetch block locations for the specified input paths. Note: multiple threads should not be used if a custom non thread-safe path filter is used. mapreduce.jobtracker.maxtasks.perjob -1 The maximum number of tasks for a single job. A value of -1 indicates that there is no maximum. mapreduce.input.lineinputformat.linespermap 1 When using NLineInputFormat, the number of lines of input data to include in each split. mapreduce.client.submit.file.replication 10 The replication level for submitted job files. This should be around the square root of the number of nodes. mapreduce.tasktracker.dns.interface default The name of the Network Interface from which a task tracker should report its IP address. mapreduce.tasktracker.dns.nameserver default The host name or IP address of the name server (DNS) which a TaskTracker should use to determine the host name used by the JobTracker for communication and display purposes. mapreduce.tasktracker.http.threads 40 The number of worker threads that for the http server. This is used for map output fetching mapreduce.tasktracker.http.address 0.0.0.0:50060 The task tracker http server address and port. If the port is 0 then the server will start on a free port. mapreduce.task.files.preserve.failedtasks false Should the files for failed tasks be kept. This should only be used on jobs that are failing, because the storage is never reclaimed. It also prevents the map outputs from being erased from the reduce directory as they are consumed. mapreduce.output.fileoutputformat.compress false Should the job outputs be compressed? mapreduce.output.fileoutputformat.compress.type RECORD If the job outputs are to compressed as SequenceFiles, how should they be compressed? Should be one of NONE, RECORD or BLOCK. mapreduce.output.fileoutputformat.compress.codec org.apache.hadoop.io.compress.DefaultCodec If the job outputs are compressed, how should they be compressed? mapreduce.map.output.compress false Should the outputs of the maps be compressed before being sent across the network. Uses SequenceFile compression. mapreduce.map.output.compress.codec org.apache.hadoop.io.compress.DefaultCodec If the map outputs are compressed, how should they be compressed? map.sort.class org.apache.hadoop.util.QuickSort The default sort class for sorting keys. mapreduce.task.userlog.limit.kb 0 The maximum size of user-logs of each task in KB. 0 disables the cap. yarn.app.mapreduce.am.container.log.limit.kb 0 The maximum size of the MRAppMaster attempt container logs in KB. 0 disables the cap. yarn.app.mapreduce.task.container.log.backups 0 Number of backup files for task logs when using ContainerRollingLogAppender (CRLA). See org.apache.log4j.RollingFileAppender.maxBackupIndex. By default, ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA is enabled for tasks when both mapreduce.task.userlog.limit.kb and yarn.app.mapreduce.task.container.log.backups are greater than zero. yarn.app.mapreduce.am.container.log.backups 0 Number of backup files for the ApplicationMaster logs when using ContainerRollingLogAppender (CRLA). See org.apache.log4j.RollingFileAppender.maxBackupIndex. By default, ContainerLogAppender (CLA) is used, and container logs are not rolled. CRLA is enabled for the ApplicationMaster when both mapreduce.task.userlog.limit.kb and yarn.app.mapreduce.am.container.log.backups are greater than zero. mapreduce.job.userlog.retain.hours 24 The maximum time, in hours, for which the user-logs are to be retained after the job completion. mapreduce.jobtracker.hosts.filename Names a file that contains the list of nodes that may connect to the jobtracker. If the value is empty, all hosts are permitted. mapreduce.jobtracker.hosts.exclude.filename Names a file that contains the list of hosts that should be excluded by the jobtracker. If the value is empty, no hosts are excluded. mapreduce.jobtracker.heartbeats.in.second 100 Expert: Approximate number of heart-beats that could arrive at JobTracker in a second. Assuming each RPC can be processed in 10msec, the default value is made 100 RPCs in a second. mapreduce.jobtracker.tasktracker.maxblacklists 4 The number of blacklists for a taskTracker by various jobs after which the task tracker could be blacklisted across all jobs. The tracker will be given a tasks later (after a day). The tracker will become a healthy tracker after a restart. mapreduce.job.maxtaskfailures.per.tracker 3 The number of task-failures on a tasktracker of a given job after which new tasks of that job aren't assigned to it. It MUST be less than mapreduce.map.maxattempts and mapreduce.reduce.maxattempts otherwise the failed task will never be tried on a different node. mapreduce.client.output.filter FAILED The filter for controlling the output of the task's userlogs sent to the console of the JobClient. The permissible options are: NONE, KILLED, FAILED, SUCCEEDED and ALL. mapreduce.client.completion.pollinterval 5000 The interval (in milliseconds) between which the JobClient polls the JobTracker for updates about job status. You may want to set this to a lower value to make tests run faster on a single node system. Adjusting this value in production may lead to unwanted client-server traffic. mapreduce.client.progressmonitor.pollinterval 1000 The interval (in milliseconds) between which the JobClient reports status to the console and checks for job completion. You may want to set this to a lower value to make tests run faster on a single node system. Adjusting this value in production may lead to unwanted client-server traffic. mapreduce.jobtracker.persist.jobstatus.active true Indicates if persistency of job status information is active or not. mapreduce.jobtracker.persist.jobstatus.hours 1 The number of hours job status information is persisted in DFS. The job status information will be available after it drops of the memory queue and between jobtracker restarts. With a zero value the job status information is not persisted at all in DFS. mapreduce.jobtracker.persist.jobstatus.dir /jobtracker/jobsInfo The directory where the job status information is persisted in a file system to be available after it drops of the memory queue and between jobtracker restarts. mapreduce.task.profile false To set whether the system should collect profiler information for some of the tasks in this job? The information is stored in the user log directory. The value is "true" if task profiling is enabled. mapreduce.task.profile.maps 0-2 To set the ranges of map tasks to profile. mapreduce.task.profile has to be set to true for the value to be accounted. mapreduce.task.profile.reduces 0-2 To set the ranges of reduce tasks to profile. mapreduce.task.profile has to be set to true for the value to be accounted. mapreduce.task.profile.params -agentlib:hprof=cpu=samples,heap=sites,force=n,thread=y,verbose=n,file=%s JVM profiler parameters used to profile map and reduce task attempts. This string may contain a single format specifier %s that will be replaced by the path to profile.out in the task attempt log directory. To specify different profiling options for map tasks and reduce tasks, more specific parameters mapreduce.task.profile.map.params and mapreduce.task.profile.reduce.params should be used. mapreduce.task.profile.map.params ${mapreduce.task.profile.params} Map-task-specific JVM profiler parameters. See mapreduce.task.profile.params mapreduce.task.profile.reduce.params ${mapreduce.task.profile.params} Reduce-task-specific JVM profiler parameters. See mapreduce.task.profile.params mapreduce.task.skip.start.attempts 2 The number of Task attempts AFTER which skip mode will be kicked off. When skip mode is kicked off, the tasks reports the range of records which it will process next, to the TaskTracker. So that on failures, TT knows which ones are possibly the bad records. On further executions, those are skipped. mapreduce.map.skip.proc.count.autoincr true The flag which if set to true, SkipBadRecords.COUNTER_MAP_PROCESSED_RECORDS is incremented by MapRunner after invoking the map function. This value must be set to false for applications which process the records asynchronously or buffer the input records. For example streaming. In such cases applications should increment this counter on their own. mapreduce.reduce.skip.proc.count.autoincr true The flag which if set to true, SkipBadRecords.COUNTER_REDUCE_PROCESSED_GROUPS is incremented by framework after invoking the reduce function. This value must be set to false for applications which process the records asynchronously or buffer the input records. For example streaming. In such cases applications should increment this counter on their own. mapreduce.job.skip.outdir If no value is specified here, the skipped records are written to the output directory at _logs/skip. User can stop writing skipped records by giving the value "none". mapreduce.map.skip.maxrecords 0 The number of acceptable skip records surrounding the bad record PER bad record in mapper. The number includes the bad record as well. To turn the feature of detection/skipping of bad records off, set the value to 0. The framework tries to narrow down the skipped range by retrying until this threshold is met OR all attempts get exhausted for this task. Set the value to Long.MAX_VALUE to indicate that framework need not try to narrow down. Whatever records(depends on application) get skipped are acceptable. mapreduce.reduce.skip.maxgroups 0 The number of acceptable skip groups surrounding the bad group PER bad group in reducer. The number includes the bad group as well. To turn the feature of detection/skipping of bad groups off, set the value to 0. The framework tries to narrow down the skipped range by retrying until this threshold is met OR all attempts get exhausted for this task. Set the value to Long.MAX_VALUE to indicate that framework need not try to narrow down. Whatever groups(depends on application) get skipped are acceptable. mapreduce.ifile.readahead true Configuration key to enable/disable IFile readahead. mapreduce.ifile.readahead.bytes 4194304 Configuration key to set the IFile readahead length in bytes. mapreduce.jobtracker.taskcache.levels 2 This is the max level of the task cache. For example, if the level is 2, the tasks cached are at the host level and at the rack level. mapreduce.job.queuename default Queue to which a job is submitted. This must match one of the queues defined in mapred-queues.xml for the system. Also, the ACL setup for the queue must allow the current user to submit a job to the queue. Before specifying a queue, ensure that the system is configured with the queue, and access is allowed for submitting jobs to the queue. mapreduce.job.tags Tags for the job that will be passed to YARN at submission time. Queries to YARN for applications can filter on these tags. mapreduce.cluster.acls.enabled false Specifies whether ACLs should be checked for authorization of users for doing various queue and job level operations. ACLs are disabled by default. If enabled, access control checks are made by JobTracker and TaskTracker when requests are made by users for queue operations like submit job to a queue and kill a job in the queue and job operations like viewing the job-details (See mapreduce.job.acl-view-job) or for modifying the job (See mapreduce.job.acl-modify-job) using Map/Reduce APIs, RPCs or via the console and web user interfaces. For enabling this flag(mapreduce.cluster.acls.enabled), this is to be set to true in mapred-site.xml on JobTracker node and on all TaskTracker nodes. mapreduce.job.acl-modify-job Job specific access-control list for 'modifying' the job. It is only used if authorization is enabled in Map/Reduce by setting the configuration property mapreduce.cluster.acls.enabled to true. This specifies the list of users and/or groups who can do modification operations on the job. For specifying a list of users and groups the format to use is "user1,user2 group1,group". If set to '*', it allows all users/groups to modify this job. If set to ' '(i.e. space), it allows none. This configuration is used to guard all the modifications with respect to this job and takes care of all the following operations: o killing this job o killing a task of this job, failing a task of this job o setting the priority of this job Each of these operations are also protected by the per-queue level ACL "acl-administer-jobs" configured via mapred-queues.xml. So a caller should have the authorization to satisfy either the queue-level ACL or the job-level ACL. Irrespective of this ACL configuration, (a) job-owner, (b) the user who started the cluster, (c) members of an admin configured supergroup configured via mapreduce.cluster.permissions.supergroup and (d) queue administrators of the queue to which this job was submitted to configured via acl-administer-jobs for the specific queue in mapred-queues.xml can do all the modification operations on a job. By default, nobody else besides job-owner, the user who started the cluster, members of supergroup and queue administrators can perform modification operations on a job. mapreduce.job.acl-view-job Job specific access-control list for 'viewing' the job. It is only used if authorization is enabled in Map/Reduce by setting the configuration property mapreduce.cluster.acls.enabled to true. This specifies the list of users and/or groups who can view private details about the job. For specifying a list of users and groups the format to use is "user1,user2 group1,group". If set to '*', it allows all users/groups to modify this job. If set to ' '(i.e. space), it allows none. This configuration is used to guard some of the job-views and at present only protects APIs that can return possibly sensitive information of the job-owner like o job-level counters o task-level counters o tasks' diagnostic information o task-logs displayed on the TaskTracker web-UI and o job.xml showed by the JobTracker's web-UI Every other piece of information of jobs is still accessible by any other user, for e.g., JobStatus, JobProfile, list of jobs in the queue, etc. Irrespective of this ACL configuration, (a) job-owner, (b) the user who started the cluster, (c) members of an admin configured supergroup configured via mapreduce.cluster.permissions.supergroup and (d) queue administrators of the queue to which this job was submitted to configured via acl-administer-jobs for the specific queue in mapred-queues.xml can do all the view operations on a job. By default, nobody else besides job-owner, the user who started the cluster, memebers of supergroup and queue administrators can perform view operations on a job. mapreduce.tasktracker.indexcache.mb 10 The maximum memory that a task tracker allows for the index cache that is used when serving map outputs to reducers. mapreduce.job.token.tracking.ids.enabled false Whether to write tracking ids of tokens to job-conf. When true, the configuration property "mapreduce.job.token.tracking.ids" is set to the token-tracking-ids of the job mapreduce.job.token.tracking.ids When mapreduce.job.token.tracking.ids.enabled is set to true, this is set by the framework to the token-tracking-ids used by the job. mapreduce.task.merge.progress.records 10000 The number of records to process during merge before sending a progress notification to the TaskTracker. mapreduce.task.combine.progress.records 10000 The number of records to process during combine output collection before sending a progress notification. mapreduce.job.reduce.slowstart.completedmaps 0.05 Fraction of the number of maps in the job which should be complete before reduces are scheduled for the job. mapreduce.job.complete.cancel.delegation.tokens true if false - do not unregister/cancel delegation tokens from renewal, because same tokens may be used by spawned jobs mapreduce.tasktracker.taskcontroller org.apache.hadoop.mapred.DefaultTaskController TaskController which is used to launch and manage task execution mapreduce.tasktracker.group Expert: Group to which TaskTracker belongs. If LinuxTaskController is configured via mapreduce.tasktracker.taskcontroller, the group owner of the task-controller binary should be same as this group. mapreduce.shuffle.port 13562 Default port that the ShuffleHandler will run on. ShuffleHandler is a service run at the NodeManager to facilitate transfers of intermediate Map outputs to requesting Reducers. mapreduce.job.reduce.shuffle.consumer.plugin.class org.apache.hadoop.mapreduce.task.reduce.Shuffle Name of the class whose instance will be used to send shuffle requests by reducetasks of this job. The class must be an instance of org.apache.hadoop.mapred.ShuffleConsumerPlugin. mapreduce.tasktracker.healthchecker.script.path Absolute path to the script which is periodicallyrun by the node health monitoring service to determine if the node is healthy or not. If the value of this key is empty or the file does not exist in the location configured here, the node health monitoring service is not started. mapreduce.tasktracker.healthchecker.interval 60000 Frequency of the node health script to be run, in milliseconds mapreduce.tasktracker.healthchecker.script.timeout 600000 Time after node health script should be killed if unresponsive and considered that the script has failed. mapreduce.tasktracker.healthchecker.script.args List of arguments which are to be passed to node health script when it is being launched comma seperated. mapreduce.job.counters.limit 120 Limit on the number of user counters allowed per job. mapreduce.framework.name local The runtime framework for executing MapReduce jobs. Can be one of local, classic or yarn. yarn.app.mapreduce.am.staging-dir /tmp/hadoop-yarn/staging The staging dir used while submitting jobs. mapreduce.am.max-attempts 2 The maximum number of application attempts. It is a application-specific setting. It should not be larger than the global number set by resourcemanager. Otherwise, it will be override. The default number is set to 2, to allow at least one retry for AM. mapreduce.job.end-notification.url Indicates url which will be called on completion of job to inform end status of job. User can give at most 2 variables with URI : $jobId and $jobStatus. If they are present in URI, then they will be replaced by their respective values. mapreduce.job.end-notification.retry.attempts 0 The number of times the submitter of the job wants to retry job end notification if it fails. This is capped by mapreduce.job.end-notification.max.attempts mapreduce.job.end-notification.retry.interval 1000 The number of milliseconds the submitter of the job wants to wait before job end notification is retried if it fails. This is capped by mapreduce.job.end-notification.max.retry.interval mapreduce.job.end-notification.max.attempts 5 true The maximum number of times a URL will be read for providing job end notification. Cluster administrators can set this to limit how long after end of a job, the Application Master waits before exiting. Must be marked as final to prevent users from overriding this. mapreduce.job.end-notification.max.retry.interval 5000 true The maximum amount of time (in milliseconds) to wait before retrying job end notification. Cluster administrators can set this to limit how long the Application Master waits before exiting. Must be marked as final to prevent users from overriding this. yarn.app.mapreduce.am.env User added environment variables for the MR App Master processes. Example : 1) A=foo This will set the env variable A to foo 2) B=$B:c This is inherit tasktracker's B env variable. yarn.app.mapreduce.am.admin.user.env Environment variables for the MR App Master processes for admin purposes. These values are set first and can be overridden by the user env (yarn.app.mapreduce.am.env) Example : 1) A=foo This will set the env variable A to foo 2) B=$B:c This is inherit app master's B env variable. yarn.app.mapreduce.am.command-opts -Xmx1024m Java opts for the MR App Master processes. The following symbol, if present, will be interpolated: @taskid@ is replaced by current TaskID. Any other occurrences of '@' will go unchanged. For example, to enable verbose gc logging to a file named for the taskid in /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of: -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc Usage of -Djava.library.path can cause programs to no longer function if hadoop native libraries are used. These values should instead be set as part of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and mapreduce.reduce.env config settings. yarn.app.mapreduce.am.admin-command-opts Java opts for the MR App Master processes for admin purposes. It will appears before the opts set by yarn.app.mapreduce.am.command-opts and thus its options can be overridden user. Usage of -Djava.library.path can cause programs to no longer function if hadoop native libraries are used. These values should instead be set as part of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and mapreduce.reduce.env config settings. yarn.app.mapreduce.am.job.task.listener.thread-count 30 The number of threads used to handle RPC calls in the MR AppMaster from remote tasks yarn.app.mapreduce.am.job.client.port-range Range of ports that the MapReduce AM can use when binding. Leave blank if you want all possible ports. For example 50000-50050,50100-50200 yarn.app.mapreduce.am.job.committer.cancel-timeout 60000 The amount of time in milliseconds to wait for the output committer to cancel an operation if the job is killed yarn.app.mapreduce.am.job.committer.commit-window 10000 Defines a time window in milliseconds for output commit operations. If contact with the RM has occurred within this window then commits are allowed, otherwise the AM will not allow output commits until contact with the RM has been re-established. yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms 1000 The interval in ms at which the MR AppMaster should send heartbeats to the ResourceManager yarn.app.mapreduce.client-am.ipc.max-retries 3 The number of client retries to the AM - before reconnecting to the RM to fetch Application Status. yarn.app.mapreduce.client-am.ipc.max-retries-on-timeouts 3 The number of client retries on socket timeouts to the AM - before reconnecting to the RM to fetch Application Status. yarn.app.mapreduce.client.max-retries 3 The number of client retries to the RM/HS before throwing exception. This is a layer above the ipc. yarn.app.mapreduce.am.resource.mb 1536 The amount of memory the MR AppMaster needs. yarn.app.mapreduce.am.resource.cpu-vcores 1 The number of virtual CPU cores the MR AppMaster needs. CLASSPATH for MR applications. A comma-separated list of CLASSPATH entries. If mapreduce.application.framework is set then this must specify the appropriate classpath for that archive, and the name of the archive must be present in the classpath. If mapreduce.app-submission.cross-platform is false, platform-specific environment vairable expansion syntax would be used to construct the default CLASSPATH entries. For Linux: $HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*, $HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*. For Windows: %HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/*, %HADOOP_MAPRED_HOME%/share/hadoop/mapreduce/lib/*. If mapreduce.app-submission.cross-platform is true, platform-agnostic default CLASSPATH for MR applications would be used: {{HADOOP_MAPRED_HOME}}/share/hadoop/mapreduce/*, {{HADOOP_MAPRED_HOME}}/share/hadoop/mapreduce/lib/* Parameter expansion marker will be replaced by NodeManager on container launch based on the underlying OS accordingly. mapreduce.application.classpath If enabled, user can submit an application cross-platform i.e. submit an application from a Windows client to a Linux/Unix server or vice versa. mapreduce.app-submission.cross-platform false Path to the MapReduce framework archive. If set, the framework archive will automatically be distributed along with the job, and this path would normally reside in a public location in an HDFS filesystem. As with distributed cache files, this can be a URL with a fragment specifying the alias to use for the archive name. For example, hdfs:/mapred/framework/hadoop-mapreduce-2.1.1.tar.gz#mrframework would alias the localized archive as "mrframework". Note that mapreduce.application.classpath must include the appropriate classpath for the specified framework. The base name of the archive, or alias of the archive if an alias is used, must appear in the specified classpath. mapreduce.application.framework.path mapreduce.job.classloader false Whether to use a separate (isolated) classloader for user classes in the task JVM. mapreduce.job.classloader.system.classes Used to override the default definition of the system classes for the job classloader. The system classes are a comma-separated list of classes that should be loaded from the system classpath, not the user-supplied JARs, when mapreduce.job.classloader is enabled. Names ending in '.' (period) are treated as package names, and names starting with a '-' are treated as negative matches. mapreduce.jobhistory.address 0.0.0.0:10020 MapReduce JobHistory Server IPC host:port mapreduce.jobhistory.webapp.address 0.0.0.0:19888 MapReduce JobHistory Server Web UI host:port mapreduce.jobhistory.keytab Location of the kerberos keytab file for the MapReduce JobHistory Server. /etc/security/keytab/jhs.service.keytab mapreduce.jobhistory.principal Kerberos principal name for the MapReduce JobHistory Server. jhs/_HOST@REALM.TLD mapreduce.jobhistory.intermediate-done-dir ${yarn.app.mapreduce.am.staging-dir}/history/done_intermediate mapreduce.jobhistory.done-dir ${yarn.app.mapreduce.am.staging-dir}/history/done mapreduce.jobhistory.cleaner.enable true mapreduce.jobhistory.cleaner.interval-ms 86400000 How often the job history cleaner checks for files to delete, in milliseconds. Defaults to 86400000 (one day). Files are only deleted if they are older than mapreduce.jobhistory.max-age-ms. mapreduce.jobhistory.max-age-ms 604800000 Job history files older than this many milliseconds will be deleted when the history cleaner runs. Defaults to 604800000 (1 week). mapreduce.jobhistory.client.thread-count 10 The number of threads to handle client API requests mapreduce.jobhistory.datestring.cache.size 200000 Size of the date string cache. Effects the number of directories which will be scanned to find a job. mapreduce.jobhistory.joblist.cache.size 20000 Size of the job list cache mapreduce.jobhistory.loadedjobs.cache.size 5 Size of the loaded job cache mapreduce.jobhistory.move.interval-ms 180000 Scan for history files to more from intermediate done dir to done dir at this frequency. mapreduce.jobhistory.move.thread-count 3 The number of threads used to move files. mapreduce.jobhistory.store.class The HistoryStorage class to use to cache history data. mapreduce.jobhistory.minicluster.fixed.ports false Whether to use fixed ports with the minicluster mapreduce.jobhistory.admin.address 0.0.0.0:10033 The address of the History server admin interface. mapreduce.jobhistory.admin.acl * ACL of who can be admin of the History server. mapreduce.jobhistory.recovery.enable false Enable the history server to store server state and recover server state upon startup. If enabled then mapreduce.jobhistory.recovery.store.class must be specified. mapreduce.jobhistory.recovery.store.class org.apache.hadoop.mapreduce.v2.hs.HistoryServerFileSystemStateStoreService The HistoryServerStateStoreService class to store history server state for recovery. mapreduce.jobhistory.recovery.store.fs.uri ${hadoop.tmp.dir}/mapred/history/recoverystore The URI where history server state will be stored if HistoryServerFileSystemStateStoreService is configured as the recovery storage class. mapreduce.jobhistory.http.policy HTTP_ONLY This configures the HTTP endpoint for JobHistoryServer web UI. The following values are supported: - HTTP_ONLY : Service is provided only on http - HTTPS_ONLY : Service is provided only on https ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/resources/mapred-job-config.xml0000664000175000017500000000311600000000000024547 0ustar00zuulzuul00000000000000 mapred.reducer.new-api true mapred.mapper.new-api true mapred.input.dir mapred.output.dir mapred.mapoutput.key.class mapred.mapoutput.value.class mapred.output.key.class mapred.output.value.class mapreduce.map.class mapreduce.reduce.class mapred.mapper.class mapred.reducer.class mapred.jar mapred.job.name ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/resources/workflow.xml0000664000175000017500000000045400000000000023140 0ustar00zuulzuul00000000000000 Workflow failed, error message[${wf:errorMessage(wf:lastErrorNode())}] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/s3_common.py0000664000175000017500000000656300000000000021010 0ustar00zuulzuul00000000000000# Copyright 2017 Massachusetts Open Cloud # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import botocore.exceptions import botocore.session from oslo_config import cfg import six import sahara.exceptions as ex from sahara.i18n import _ from sahara.service.castellan import utils as key_manager S3_JB_PREFIX = "s3://" S3_ACCESS_KEY_CONFIG = "fs.s3a.access.key" S3_SECRET_KEY_CONFIG = "fs.s3a.secret.key" S3_ENDPOINT_CONFIG = "fs.s3a.endpoint" S3_BUCKET_IN_PATH_CONFIG = "fs.s3a.path.style.access" S3_SSL_CONFIG = "fs.s3a.connection.ssl.enabled" S3_DS_CONFIGS = [S3_ACCESS_KEY_CONFIG, S3_SECRET_KEY_CONFIG, S3_ENDPOINT_CONFIG, S3_BUCKET_IN_PATH_CONFIG, S3_SSL_CONFIG] CONF = cfg.CONF def _get_s3_client(extra): sess = botocore.session.get_session() secretkey = key_manager.get_secret(extra['secretkey']) return sess.create_client( 's3', # TODO(jfreud): investigate region name support region_name=None, # TODO(jfreud): investigate configurable verify verify=False, endpoint_url=extra['endpoint'], aws_access_key_id=extra['accesskey'], aws_secret_access_key=secretkey ) def _get_names_from_job_binary_url(url): parse = six.moves.urllib.parse.urlparse(url) return (parse.netloc + parse.path).split('/', 1) def _get_raw_job_binary_data(job_binary, conn): names = _get_names_from_job_binary_url(job_binary.url) bucket, obj = names try: size = conn.head_object(Bucket=bucket, Key=obj)['ContentLength'] # We have bytes, but want kibibytes: total_KB = size / 1024.0 if total_KB > CONF.job_binary_max_KB: raise ex.DataTooBigException( round(total_KB, 1), CONF.job_binary_max_KB, _("Size of S3 object (%(size)sKB) is greater " "than maximum (%(maximum)sKB)")) body = conn.get_object(Bucket=bucket, Key=obj)['Body'].read() except ex.DataTooBigException: raise except Exception: raise ex.S3ClientException("Couldn't get object from s3") return body def _validate_job_binary_url(job_binary_url): if not job_binary_url.startswith(S3_JB_PREFIX): # Sanity check raise ex.BadJobBinaryException( _("URL for binary in S3 must start with %s") % S3_JB_PREFIX) names = _get_names_from_job_binary_url(job_binary_url) if len(names) == 1: # we have a bucket instead of an individual object raise ex.BadJobBinaryException( _("URL for binary in S3 must specify an object not a bucket")) def get_raw_job_binary_data(job_binary): _validate_job_binary_url(job_binary.url) try: conn = _get_s3_client(job_binary.extra) except Exception: raise ex.S3ClientException("Couldn't create boto client") return _get_raw_job_binary_data(job_binary, conn) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/shares.py0000664000175000017500000002645200000000000020377 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections import itertools from oslo_log import log import six from sahara import context from sahara.utils.openstack import manila LOG = log.getLogger(__name__) def mount_shares(cluster): """Mounts all shares specified for the cluster and any of its node groups. - In the event that a specific share is configured for both the cluster and a specific node group, configuration at the node group level will be ignored. - In the event that utilities required to mount the share are not already installed on the node, this method may fail if the node cannot access the internet. - This method will not remove already-mounted shares. - This method will not remove or remount (or currently, reconfigure) shares already mounted to the desired local mount point. :param cluster: The cluster model. """ node_groups = (ng for ng in cluster.node_groups if ng.shares) ng_mounts = [_mount(ng, share_config) for ng in node_groups for share_config in ng.shares] c_mounts = [_mount(ng, share_config) for ng in cluster.node_groups for share_config in cluster.shares or []] if not (ng_mounts or c_mounts): return ng_mounts_by_share_id = _group_mounts_by_share_id(ng_mounts) c_mounts_by_share_id = _group_mounts_by_share_id(c_mounts) all_share_ids = (set(ng_mounts_by_share_id.keys()) | set(c_mounts_by_share_id.keys())) mounts_by_share_id = { share_id: c_mounts_by_share_id.get(share_id) or ng_mounts_by_share_id[share_id] for share_id in all_share_ids} all_mounts = itertools.chain(*mounts_by_share_id.values()) mounts_by_ng_id = _group_mounts_by_ng_id(all_mounts) client = manila.client() handlers_by_share_id = {id: _ShareHandler.create_from_id(id, client) for id in all_share_ids} for mounts in mounts_by_ng_id.values(): node_group_shares = _NodeGroupShares(mounts[0].node_group) for mount in mounts: share_id = mount.share_config['id'] node_group_shares.add_share(mount.share_config, handlers_by_share_id[share_id]) node_group_shares.mount_shares_to_node_group() def unmount_shares(cluster, unmount_share_list): """Unmounts all shares in unmount_share_list on the given cluster :param cluster: The cluster model. :param unmount_share_list: list of shares to unmount """ client = manila.client() unmount_share_ids = (set(s['id'] for s in unmount_share_list)) handlers_by_share_id = {id: _ShareHandler.create_from_id(id, client) for id in unmount_share_ids} for share in unmount_share_list: for ng in cluster.node_groups: for instance in ng.instances: handlers_by_share_id[share['id']].unmount_from_instance( instance.remote(), share) _mount = collections.namedtuple('Mount', ['node_group', 'share_config']) def _group_mounts(mounts, grouper): result = collections.defaultdict(list) for mount in mounts: result[grouper(mount)].append(mount) return result def _group_mounts_by_share_id(mounts): return _group_mounts(mounts, lambda mount: mount.share_config['id']) def _group_mounts_by_ng_id(mounts): return _group_mounts(mounts, lambda mount: mount.node_group['id']) class _NodeGroupShares(object): """Organizes share mounting for a single node group.""" _share = collections.namedtuple('Share', ['share_config', 'handler']) def __init__(self, node_group): self.node_group = node_group self.shares = [] def add_share(self, share_config, handler): """Adds a share to mount; add all shares before mounting.""" self.shares.append(self._share(share_config, handler)) def mount_shares_to_node_group(self): """Mounts all configured shares to the node group.""" for instance in self.node_group.instances: with context.set_current_instance_id(instance.instance_id): self._mount_shares_to_instance(instance) def _mount_shares_to_instance(self, instance): # Note: Additional iteration here is critical: based on # experimentation, failure to execute allow_access before spawning # the remote results in permission failure. for share in self.shares: share.handler.allow_access_to_instance(instance, share.share_config) with instance.remote() as remote: share_types = set(type(share.handler) for share in self.shares) for share_type in share_types: share_type.setup_instance(remote) for share in self.shares: share.handler.mount_to_instance(remote, share.share_config) @six.add_metaclass(abc.ABCMeta) class _ShareHandler(object): """Handles mounting of a single share to any number of instances.""" @classmethod def setup_instance(cls, remote): """Prepares an instance to mount this type of share.""" pass @classmethod def create_from_id(cls, share_id, client): """Factory method for creation from a share_id of unknown type.""" share = manila.get_share(client, share_id, raise_on_error=True) mounter_class = _share_types[share.share_proto] return mounter_class(share, client) def __init__(self, share, client): self.share = share self.client = client def allow_access_to_instance(self, instance, share_config): """Mounts a specific share to a specific instance.""" access_level = self._get_access_level(share_config) accesses = list(filter(lambda x: (x.access_type == 'ip' and x.access_to == instance.internal_ip), self.share.access_list())) if accesses: access = accesses[0] if access.access_level not in ('ro', 'rw'): LOG.warning("Unknown permission level {access_level} on share " "id {share_id} for ip {ip}. Leaving pre-existing " "permissions.".format( access_level=access.access_level, share_id=self.share.id, ip=instance.internal_ip)) elif access.access_level == 'ro' and access_level == 'rw': self.share.deny(access.id) self.share.allow('ip', instance.internal_ip, access_level) else: self.share.allow('ip', instance.internal_ip, access_level) @abc.abstractmethod def mount_to_instance(self, remote, share_info): """Mounts the share to the instance as configured.""" pass @abc.abstractmethod def unmount_from_instance(self, remote, share_info): """Unmounts the share from the instance.""" pass def _get_access_level(self, share_config): return share_config.get('access_level', 'rw') def _default_mount(self): return '/mnt/{0}'.format(self.share.id) def _get_path(self, share_info): return share_info.get('path', self._default_mount()) class _NFSMounter(_ShareHandler): """Handles mounting of a single NFS share to any number of instances.""" _DEBIAN_INSTALL = "dpkg -s nfs-common || apt-get -y install nfs-common" _REDHAT_INSTALL = "rpm -q nfs-utils || yum install -y nfs-utils" _NFS_CHECKS = { "centos": _REDHAT_INSTALL, "fedora": _REDHAT_INSTALL, "redhatenterpriseserver": _REDHAT_INSTALL, "redhat": _REDHAT_INSTALL, "ubuntu": _DEBIAN_INSTALL } _MKDIR_COMMAND = 'mkdir -p %s' _MOUNT_COMMAND = ("mount | grep '%(remote)s' | grep '%(local)s' | " "grep nfs || mount -t nfs %(access_arg)s %(remote)s " "%(local)s") _UNMOUNT_COMMAND = ("umount -f %s ") _RMDIR_COMMAND = 'rmdir %s' @classmethod def setup_instance(cls, remote): """Prepares an instance to mount this type of share.""" distro = remote.get_os_distrib() if distro in cls._NFS_CHECKS: command = cls._NFS_CHECKS[distro] remote.execute_command(command, run_as_root=True) else: LOG.warning("Cannot verify installation of NFS mount tools for " "unknown distro {distro}.".format(distro=distro)) def mount_to_instance(self, remote, share_info): """Mounts the share to the instance as configured.""" local_path = self._get_path(share_info) access_level = self._get_access_level(share_info) access_arg = '-w' if access_level == 'rw' else '-r' remote.execute_command(self._MKDIR_COMMAND % local_path, run_as_root=True) mount_command = self._MOUNT_COMMAND % { "remote": self.share.export_location, "local": local_path, "access_arg": access_arg} remote.execute_command(mount_command, run_as_root=True) def unmount_from_instance(self, remote, share_info): """Unmounts the share from the instance.""" local_path = self._get_path(share_info) unmount_command = self._UNMOUNT_COMMAND % local_path rmdir_command = self._RMDIR_COMMAND % local_path remote.execute_command(unmount_command, run_as_root=True) remote.execute_command(rmdir_command, run_as_root=True) _share_types = {"NFS": _NFSMounter} SUPPORTED_SHARE_TYPES = _share_types.keys() def make_share_path(mount_point, path): return "{0}{1}".format(mount_point, path) def default_mount(share_id): client = manila.client() return _ShareHandler.create_from_id(share_id, client)._default_mount() def get_share_path(url, shares): # url example: 'manila://ManilaShare-uuid/path_to_file' url = six.moves.urllib.parse.urlparse(url) # using list() as a python2/3 workaround share_list = list(filter(lambda s: s['id'] == url.netloc, shares)) if not share_list: # Share id is not in the share list, let the caller # determine a default path if possible path = None else: # We will always select the first one. Let the # caller determine whether duplicates are okay mount_point = share_list[0].get('path', None) # Do this in two steps instead of passing the default # expression to get(), because it's a big side effect if mount_point is None: # The situation here is that the user specified a # share without a path, so the default mnt was used # during cluster provisioning. mount_point = default_mount(share_list[0]['id']) path = make_share_path(mount_point, url.path) return path ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7418911 sahara-16.0.0/sahara/service/edp/spark/0000775000175000017500000000000000000000000017647 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/spark/__init__.py0000664000175000017500000000000000000000000021746 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/spark/engine.py0000664000175000017500000004347300000000000021501 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # Copyright (c) 2015 ISPRAS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_config import cfg from oslo_utils import uuidutils from sahara import conductor as c from sahara import context from sahara import exceptions as e from sahara.i18n import _ from sahara.service.castellan import utils as key_manager from sahara.service.edp import base_engine from sahara.service.edp.job_binaries import manager as jb_manager from sahara.service.edp import job_utils from sahara.service.edp import s3_common from sahara.service.validations.edp import job_execution as j from sahara.swift import swift_helper as sw from sahara.swift import utils as su from sahara.utils import cluster as c_u from sahara.utils import edp from sahara.utils import files from sahara.utils import remote from sahara.utils import xmlutils conductor = c.API CONF = cfg.CONF class SparkJobEngine(base_engine.JobEngine): def __init__(self, cluster): self.cluster = cluster # We'll always run the driver program on the master self.master = None # These parameters depend on engine that is used self.plugin_params = {"master": "", "spark-user": "", "deploy-mode": "", "spark-submit": "", "driver-class-path": "", } def _get_pid_and_inst_id(self, job_id): try: pid, inst_id = job_id.split("@", 1) if pid and inst_id: return (pid, inst_id) except Exception: pass return "", "" def _get_instance_if_running(self, job_execution): pid, inst_id = self._get_pid_and_inst_id(job_execution.engine_job_id) if not pid or not inst_id or ( job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED): return None, None # TODO(tmckay): well, if there is a list index out of range # error here it probably means that the instance is gone. If we # have a job execution that is not terminated, and the instance # is gone, we should probably change the status somehow. # For now, do nothing. try: instance = c_u.get_instances(self.cluster, [inst_id])[0] except Exception: instance = None return pid, instance def _get_result_file(self, r, job_execution): result = os.path.join(job_execution.extra['spark-path'], "result") return r.execute_command("cat %s" % result, raise_when_error=False) def _check_pid(self, r, pid): ret, stdout = r.execute_command("ps hp %s" % pid, raise_when_error=False) return ret def _get_job_status_from_remote(self, r, pid, job_execution): # If the pid is there, it's still running if self._check_pid(r, pid) == 0: return {"status": edp.JOB_STATUS_RUNNING} # The process ended. Look in the result file to get the exit status ret, stdout = self._get_result_file(r, job_execution) if ret == 0: exit_status = stdout.strip() if exit_status == "0": return {"status": edp.JOB_STATUS_SUCCEEDED} # SIGINT will yield either -2 or 130 elif exit_status in ["-2", "130"]: return {"status": edp.JOB_STATUS_KILLED} # Well, process is done and result is missing or unexpected return {"status": edp.JOB_STATUS_DONEWITHERROR} def _job_script(self, python_version): path = "service/edp/resources/launch_command.py" return files.get_file_text(path).replace( '{{PYTHON_VERSION}}', python_version) def _upload_wrapper_xml(self, where, job_dir, job_configs): xml_name = 'spark.xml' proxy_configs = job_configs.get('proxy_configs') configs = {} cfgs = job_configs.get('configs', {}) if proxy_configs: configs[sw.HADOOP_SWIFT_USERNAME] = proxy_configs.get( 'proxy_username') configs[sw.HADOOP_SWIFT_PASSWORD] = key_manager.get_secret( proxy_configs.get('proxy_password')) configs[sw.HADOOP_SWIFT_TRUST_ID] = proxy_configs.get( 'proxy_trust_id') configs[sw.HADOOP_SWIFT_DOMAIN_NAME] = CONF.proxy_user_domain_name else: targets = [sw.HADOOP_SWIFT_USERNAME] configs = {k: cfgs[k] for k in targets if k in cfgs} if sw.HADOOP_SWIFT_PASSWORD in cfgs: configs[sw.HADOOP_SWIFT_PASSWORD] = ( key_manager.get_secret(cfgs[sw.HADOOP_SWIFT_PASSWORD]) ) for s3_cfg_key in s3_common.S3_DS_CONFIGS: if s3_cfg_key in cfgs: if s3_cfg_key == s3_common.S3_SECRET_KEY_CONFIG: configs[s3_cfg_key] = ( key_manager.get_secret(cfgs[s3_cfg_key]) ) else: configs[s3_cfg_key] = cfgs[s3_cfg_key] content = xmlutils.create_hadoop_xml(configs) with remote.get_remote(where) as r: dst = os.path.join(job_dir, xml_name) r.write_file_to(dst, content) return xml_name def _prepare_job_binaries(self, job_binaries, r): for jb in job_binaries: jb_manager.JOB_BINARIES.get_job_binary_by_url(jb.url). \ prepare_cluster(jb, remote=r) def _upload_job_files(self, where, job_dir, job, job_configs): def upload(r, dir, job_file, proxy_configs): path = jb_manager.JOB_BINARIES. \ get_job_binary_by_url(job_file.url). \ copy_binary_to_cluster(job_file, proxy_configs=proxy_configs, remote=r, context=context.ctx()) return path def upload_builtin(r, dir, builtin): dst = os.path.join(dir, builtin['name']) r.write_file_to(dst, builtin['raw']) return dst builtin_libs = [] if edp.is_adapt_spark_for_swift_enabled( job_configs.get('configs', {})): path = 'service/edp/resources/edp-spark-wrapper.jar' name = 'builtin-%s.jar' % uuidutils.generate_uuid() builtin_libs = [{'raw': files.try_get_file_text(path), 'name': name}] uploaded_paths = [] builtin_paths = [] with remote.get_remote(where) as r: mains = list(job.mains) if job.mains else [] libs = list(job.libs) if job.libs else [] job_binaries = mains + libs self._prepare_job_binaries(job_binaries, r) for job_file in job_binaries: uploaded_paths.append( upload(r, job_dir, job_file, job_configs.get('proxy_configs'))) for builtin in builtin_libs: builtin_paths.append( upload_builtin(r, job_dir, builtin)) return uploaded_paths, builtin_paths def _check_driver_class_path(self, job_configs, param_dict, wf_dir): overridden = edp.spark_driver_classpath( job_configs.get('configs', {})) if overridden: param_dict['driver-class-path'] = ( " --driver-class-path " + overridden) return if not param_dict.get('wrapper_jar'): # no need in driver classpath if swift as datasource is not used param_dict['driver-class-path'] = "" return cp = param_dict['driver-class-path'] or "" if param_dict['deploy-mode'] == 'client' and not ( cp.startswith(":") or cp.endswith(":")): cp += ":" + wf_dir param_dict['driver-class-path'] = " --driver-class-path " + cp def cancel_job(self, job_execution): pid, instance = self._get_instance_if_running(job_execution) if instance is not None: with remote.get_remote(instance) as r: ret, stdout = r.execute_command("kill -SIGINT %s" % pid, raise_when_error=False) if ret == 0: # We had some effect, check the status return self._get_job_status_from_remote(r, pid, job_execution) def get_job_status(self, job_execution): pid, instance = self._get_instance_if_running(job_execution) if instance is not None: with remote.get_remote(instance) as r: return self._get_job_status_from_remote(r, pid, job_execution) def _build_command(self, wf_dir, paths, builtin_paths, updated_job_configs): indep_params = {} # TODO(tmckay): for now, paths[0] is always assumed to be the app # jar and we generate paths in order (mains, then libs). # When we have a Spark job type, we can require a "main" and set # the app jar explicitly to be "main" indep_params["app_jar"] = paths.pop(0) indep_params["job_class"] = ( updated_job_configs["configs"]["edp.java.main_class"]) if self.plugin_params.get('drivers-to-jars', None): paths.extend(self.plugin_params['drivers-to-jars']) # If we uploaded builtins then we are using a wrapper jar. It will # be the first one on the builtin list and the original app_jar needs # to be added to the 'additional' jars if builtin_paths: indep_params["wrapper_jar"] = builtin_paths.pop(0) indep_params["wrapper_class"] = ( 'org.openstack.sahara.edp.SparkWrapper') wrapper_xml = self._upload_wrapper_xml(self.master, wf_dir, updated_job_configs) indep_params["wrapper_args"] = "%s %s" % ( wrapper_xml, indep_params["job_class"]) indep_params["addnl_files"] = wrapper_xml indep_params["addnl_jars"] = ",".join( [indep_params["wrapper_jar"]] + paths + builtin_paths) else: indep_params["addnl_jars"] = ",".join(paths) # All additional jars are passed with the --jars option if indep_params["addnl_jars"]: indep_params["addnl_jars"] = ( " --jars " + indep_params["addnl_jars"]) # Launch the spark job using spark-submit and deploy_mode = client # TODO(tmckay): we need to clean up wf_dirs on long running clusters # TODO(tmckay): probably allow for general options to spark-submit indep_params["args"] = updated_job_configs.get('args', []) indep_params["args"] = " ".join([su.inject_swift_url_suffix(arg) for arg in indep_params["args"]]) if indep_params.get("args"): indep_params["args"] = (" " + indep_params["args"]) mutual_dict = self.plugin_params.copy() mutual_dict.update(indep_params) # Handle driver classpath. Because of the way the hadoop # configuration is handled in the wrapper class, using # wrapper_xml, the working directory must be on the classpath self._check_driver_class_path(updated_job_configs, mutual_dict, wf_dir) if mutual_dict.get("wrapper_jar"): # Substrings which may be empty have spaces # embedded if they are non-empty cmd = ( '%(spark-user)s%(spark-submit)s%(driver-class-path)s' ' --files %(addnl_files)s' ' --class %(wrapper_class)s%(addnl_jars)s' ' --master %(master)s' ' --deploy-mode %(deploy-mode)s' ' %(app_jar)s %(wrapper_args)s%(args)s') % dict( mutual_dict) else: cmd = ( '%(spark-user)s%(spark-submit)s%(driver-class-path)s' ' --class %(job_class)s%(addnl_jars)s' ' --master %(master)s' ' --deploy-mode %(deploy-mode)s' ' %(app_jar)s%(args)s') % dict( mutual_dict) return cmd def run_job(self, job_execution): ctx = context.ctx() job = conductor.job_get(ctx, job_execution.job_id) # This will be a dictionary of tuples, (native_url, runtime_url) # keyed by data_source id data_source_urls = {} additional_sources, updated_job_configs = ( job_utils.resolve_data_source_references(job_execution.job_configs, job_execution.id, data_source_urls, self.cluster) ) job_execution = conductor.job_execution_update( ctx, job_execution, {"data_source_urls": job_utils.to_url_dict(data_source_urls)}) # Now that we've recorded the native urls, we can switch to the # runtime urls data_source_urls = job_utils.to_url_dict(data_source_urls, runtime=True) job_utils.prepare_cluster_for_ds(additional_sources, self.cluster, updated_job_configs, data_source_urls) # It is needed in case we are working with Spark plugin self.plugin_params['master'] = ( self.plugin_params['master'] % {'host': self.master.hostname()}) # TODO(tmckay): wf_dir should probably be configurable. # The only requirement is that the dir is writable by the image user wf_dir = job_utils.create_workflow_dir(self.master, '/tmp/spark-edp', job, job_execution.id, "700") paths, builtin_paths = self._upload_job_files( self.master, wf_dir, job, updated_job_configs) # We can shorten the paths in this case since we'll run out of wf_dir paths = [os.path.basename(p) if p.startswith(wf_dir) else p for p in paths] builtin_paths = [os.path.basename(p) for p in builtin_paths] cmd = self._build_command(wf_dir, paths, builtin_paths, updated_job_configs) job_execution = conductor.job_execution_get(ctx, job_execution.id) if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED: return (None, edp.JOB_STATUS_KILLED, None) # If an exception is raised here, the job_manager will mark # the job failed and log the exception # The redirects of stdout and stderr will preserve output in the wf_dir with remote.get_remote(self.master) as r: # Upload the command launch script launch = os.path.join(wf_dir, "launch_command") python_version = r.get_python_version() r.write_file_to(launch, self._job_script(python_version)) r.execute_command("chmod u+rwx,g+rx,o+rx %s" % wf_dir) r.execute_command("chmod +x %s" % launch) ret, stdout = r.execute_command( "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!" % (wf_dir, cmd)) if ret == 0: # Success, we'll add the wf_dir in job_execution.extra and store # pid@instance_id as the job id # We know the job is running so return "RUNNING" return (stdout.strip() + "@" + self.master.id, edp.JOB_STATUS_RUNNING, {'spark-path': wf_dir}) # Hmm, no execption but something failed. # Since we're using backgrounding with redirect, this is unlikely. raise e.EDPError(_("Spark job execution failed. Exit status = " "%(status)s, stdout = %(stdout)s") % {'status': ret, 'stdout': stdout}) def run_scheduled_job(self, job_execution): raise e.NotImplementedException(_("Currently Spark engine does not" " support scheduled EDP jobs")) def validate_job_execution(self, cluster, job, data): j.check_main_class_present(data, job) @staticmethod def get_possible_job_config(job_type): return {'job_config': {'configs': [], 'args': []}} @staticmethod def get_supported_job_types(): return [edp.JOB_TYPE_SPARK] class SparkShellJobEngine(SparkJobEngine): def _build_command(self, wf_dir, paths, builtin_paths, updated_job_configs): main_script = paths.pop(0) args = " ".join(updated_job_configs.get('args', [])) env_params = "" params = updated_job_configs.get('params', {}) for key, value in params.items(): env_params += "{key}={value} ".format(key=key, value=value) cmd = ("{env_params}{cmd} {main_script} {args}".format( cmd='/bin/sh', main_script=main_script, env_params=env_params, args=args)) return cmd def validate_job_execution(self, cluster, job, data): # Shell job doesn't require any special validation pass @staticmethod def get_possible_job_config(job_type): return {'job_config': {'configs': {}, 'args': [], 'params': {}}} @staticmethod def get_supported_job_types(): return [edp.JOB_TYPE_SHELL] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7418911 sahara-16.0.0/sahara/service/edp/storm/0000775000175000017500000000000000000000000017673 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/storm/__init__.py0000664000175000017500000000000000000000000021772 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/storm/engine.py0000664000175000017500000003074200000000000021520 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_config import cfg from oslo_utils import uuidutils from sahara import conductor as c from sahara import context from sahara import exceptions as e from sahara.i18n import _ from sahara.plugins import utils as plugin_utils from sahara.service.edp import base_engine from sahara.service.edp.job_binaries import manager as jb_manager from sahara.service.edp import job_utils from sahara.service.validations.edp import job_execution as j from sahara.utils import cluster as cluster_utils from sahara.utils import edp from sahara.utils import files from sahara.utils import remote conductor = c.API CONF = cfg.CONF class StormJobEngine(base_engine.JobEngine): def __init__(self, cluster): self.cluster = cluster def _get_topology_and_inst_id(self, job_id): try: topology_name, inst_id = job_id.split("@", 1) if topology_name and inst_id: return (topology_name, inst_id) except Exception: pass return "", "" def _get_instance_if_running(self, job_execution): topology_name, inst_id = self._get_topology_and_inst_id( job_execution.engine_job_id) if not topology_name or not inst_id or ( job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED): return None, None # TODO(tmckay): well, if there is a list index out of range # error here it probably means that the instance is gone. If we # have a job execution that is not terminated, and the instance # is gone, we should probably change the status somehow. # For now, do nothing. try: instance = cluster_utils.get_instances(self.cluster, [inst_id])[0] except Exception: instance = None return topology_name, instance def _get_topology_name(self, job_execution): topology_name, inst_id = self._get_topology_and_inst_id( job_execution.engine_job_id) return topology_name def _set_topology_name(self, job_execution, name): return self._generate_topology_name(name) def _generate_topology_name(self, name): return name + "_" + uuidutils.generate_uuid() def _get_job_status_from_remote(self, job_execution, retries=3): topology_name, inst_id = self._get_instance_if_running( job_execution) if topology_name is None or inst_id is None: return edp.JOB_STATUSES_TERMINATED topology_name = self._get_topology_name(job_execution) master = plugin_utils.get_instance(self.cluster, "nimbus") cmd = ( "%(storm)s -c nimbus.host=%(host)s " "list | grep %(topology_name)s | awk '{print $2}'") % ( { "storm": "/usr/local/storm/bin/storm", "host": master.hostname(), "topology_name": topology_name }) for i in range(retries): with remote.get_remote(master) as r: ret, stdout = r.execute_command("%s " % (cmd)) # If the status is ACTIVE is there, it's still running if stdout.strip() == "ACTIVE": return {"status": edp.JOB_STATUS_RUNNING} else: if i == retries - 1: return {"status": edp.JOB_STATUS_KILLED} context.sleep(10) def _job_script(self, python_version): path = "service/edp/resources/launch_command.py" return files.get_file_text(path).replace( '{{PYTHON_VERSION}}', python_version) def _prepare_job_binaries(self, job_binaries, r): for jb in job_binaries: jb_manager.JOB_BINARIES.get_job_binary_by_url(jb.url). \ prepare_cluster(jb, remote=r) def _upload_job_files(self, where, job_dir, job, job_configs): def upload(r, dir, job_file, proxy_configs): path = jb_manager.JOB_BINARIES. \ get_job_binary_by_url(job_file.url). \ copy_binary_to_cluster(job_file, proxy_configs=proxy_configs, remote=r, context=context.ctx()) return path uploaded_paths = [] with remote.get_remote(where) as r: mains = list(job.mains) if job.mains else [] libs = list(job.libs) if job.libs else [] job_binaries = mains + libs self._prepare_job_binaries(job_binaries, r) for job_file in job_binaries: uploaded_paths.append( upload(r, job_dir, job_file, job_configs.get('proxy_configs'))) return uploaded_paths def _exec_cmd_on_remote_instance(self, master, cmd): if master is not None: with remote.get_remote(master) as r: ret, stdout = r.execute_command("%s > /dev/null 2>&1 & echo $!" % cmd) return ret, stdout def cancel_job(self, job_execution): topology_name, instance = self._get_instance_if_running(job_execution) if topology_name is None or instance is None: return None topology_name = self._get_topology_name(job_execution) master = plugin_utils.get_instance(self.cluster, "nimbus") cmd = ( '%(storm_kill)s -c nimbus.host=%(host)s %(topology_name)s') % ( { "storm_kill": "/usr/local/storm/bin/storm kill", "host": master.hostname(), "topology_name": topology_name }) ret, stdout = self._exec_cmd_on_remote_instance(instance, cmd) if ret == 0: # We had some effect, check the status return self._get_job_status_from_remote(job_execution) def get_job_status(self, job_execution): topology_name, instance = self._get_instance_if_running(job_execution) if instance is not None: return self._get_job_status_from_remote(job_execution, retries=3) def _execute_remote_job(self, master, wf_dir, cmd): # If an exception is raised here, the job_manager will mark # the job failed and log the exception # The redirects of stdout and stderr will preserve output in the wf_dir with remote.get_remote(master) as r: # Upload the command launch script launch = os.path.join(wf_dir, "launch_command") python_version = r.get_python_version() r.write_file_to(launch, self._job_script(python_version)) r.execute_command("chmod +x %s" % launch) ret, stdout = r.execute_command( "cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!" % (wf_dir, cmd)) return ret, stdout def _build_command(self, paths, updated_job_configs, host, topology_name): app_jar = paths.pop(0) job_class = updated_job_configs["configs"]["edp.java.main_class"] args = updated_job_configs.get('args', []) args = " ".join([arg for arg in args]) if args: args = " " + args cmd = ( '%(storm_jar)s -c nimbus.host=%(host)s %(job_jar)s ' '%(main_class)s %(topology_name)s%(args)s' % ( { "storm_jar": "/usr/local/storm/bin/storm jar", "main_class": job_class, "job_jar": app_jar, "host": host, "topology_name": topology_name, "args": args })) return cmd def run_job(self, job_execution): ctx = context.ctx() job = conductor.job_get(ctx, job_execution.job_id) # This will be a dictionary of tuples, (native_url, runtime_url) # keyed by data_source id data_source_urls = {} additional_sources, updated_job_configs = ( job_utils.resolve_data_source_references(job_execution.job_configs, job_execution.id, data_source_urls, self.cluster) ) job_execution = conductor.job_execution_update( ctx, job_execution, {"data_source_urls": job_utils.to_url_dict(data_source_urls)}) # Now that we've recorded the native urls, we can switch to the # runtime urls data_source_urls = job_utils.to_url_dict(data_source_urls, runtime=True) job_utils.prepare_cluster_for_ds(additional_sources, self.cluster, updated_job_configs, data_source_urls) # We'll always run the driver program on the master master = plugin_utils.get_instance(self.cluster, "nimbus") # TODO(tmckay): wf_dir should probably be configurable. # The only requirement is that the dir is writable by the image user wf_dir = job_utils.create_workflow_dir(master, '/tmp/storm-edp', job, job_execution.id, "700") paths = self._upload_job_files(master, wf_dir, job, updated_job_configs) topology_name = self._set_topology_name(job_execution, job.name) # Launch the storm job using storm jar host = master.hostname() cmd = self._build_command(paths, updated_job_configs, host, topology_name) job_execution = conductor.job_execution_get(ctx, job_execution.id) if job_execution.info['status'] == edp.JOB_STATUS_TOBEKILLED: return (None, edp.JOB_STATUS_KILLED, None) ret, stdout = self._execute_remote_job(master, wf_dir, cmd) if ret == 0: # Success, we'll add the wf_dir in job_execution.extra and store # topology_name@instance_id as the job id # We know the job is running so return "RUNNING" return (topology_name + "@" + master.id, edp.JOB_STATUS_RUNNING, {'storm-path': wf_dir}) # Hmm, no execption but something failed. # Since we're using backgrounding with redirect, this is unlikely. raise e.EDPError(_("Storm job execution failed. Exit status = " "%(status)s, stdout = %(stdout)s") % {'status': ret, 'stdout': stdout}) def run_scheduled_job(self, job_execution): raise e.NotImplementedException(_("Currently Storm engine does not" " support scheduled EDP jobs")) def validate_job_execution(self, cluster, job, data): j.check_main_class_present(data, job) @staticmethod def get_possible_job_config(job_type): return {'job_config': {'configs': [], 'args': []}} @staticmethod def get_supported_job_types(): return [edp.JOB_TYPE_STORM] class StormPyleusJobEngine(StormJobEngine): def _build_command(self, paths, updated_job_configs, host, topology_name): jar_file = paths.pop(0) cmd = ("{pyleus} -n {nimbus_host} {jar_file}").format( pyleus='pyleus submit', nimbus_host=host, jar_file=jar_file) return cmd def validate_job_execution(self, cluster, job, data): j.check_topology_name_present(data, job) def _set_topology_name(self, job_execution, name): topology_name = job_execution["configs"]["topology_name"] return topology_name def _execute_remote_job(self, master, wf_dir, cmd): with remote.get_remote(master) as r: ret, stdout = r.execute_command( "cd %s; %s > /dev/null 2>&1 & echo $!" % (wf_dir, cmd)) return ret, stdout @staticmethod def get_possible_job_config(job_type): return {'job_config': {'configs': [], 'args': []}} @staticmethod def get_supported_job_types(): return [edp.JOB_TYPE_PYLEUS] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7418911 sahara-16.0.0/sahara/service/edp/utils/0000775000175000017500000000000000000000000017667 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/utils/__init__.py0000664000175000017500000000000000000000000021766 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/edp/utils/shares.py0000664000175000017500000002645200000000000021537 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections import itertools from oslo_log import log import six from sahara import context from sahara.utils.openstack import manila LOG = log.getLogger(__name__) def mount_shares(cluster): """Mounts all shares specified for the cluster and any of its node groups. - In the event that a specific share is configured for both the cluster and a specific node group, configuration at the node group level will be ignored. - In the event that utilities required to mount the share are not already installed on the node, this method may fail if the node cannot access the internet. - This method will not remove already-mounted shares. - This method will not remove or remount (or currently, reconfigure) shares already mounted to the desired local mount point. :param cluster: The cluster model. """ node_groups = (ng for ng in cluster.node_groups if ng.shares) ng_mounts = [_mount(ng, share_config) for ng in node_groups for share_config in ng.shares] c_mounts = [_mount(ng, share_config) for ng in cluster.node_groups for share_config in cluster.shares or []] if not (ng_mounts or c_mounts): return ng_mounts_by_share_id = _group_mounts_by_share_id(ng_mounts) c_mounts_by_share_id = _group_mounts_by_share_id(c_mounts) all_share_ids = (set(ng_mounts_by_share_id.keys()) | set(c_mounts_by_share_id.keys())) mounts_by_share_id = { share_id: c_mounts_by_share_id.get(share_id) or ng_mounts_by_share_id[share_id] for share_id in all_share_ids} all_mounts = itertools.chain(*mounts_by_share_id.values()) mounts_by_ng_id = _group_mounts_by_ng_id(all_mounts) client = manila.client() handlers_by_share_id = {id: _ShareHandler.create_from_id(id, client) for id in all_share_ids} for mounts in mounts_by_ng_id.values(): node_group_shares = _NodeGroupShares(mounts[0].node_group) for mount in mounts: share_id = mount.share_config['id'] node_group_shares.add_share(mount.share_config, handlers_by_share_id[share_id]) node_group_shares.mount_shares_to_node_group() def unmount_shares(cluster, unmount_share_list): """Unmounts all shares in unmount_share_list on the given cluster :param cluster: The cluster model. :param unmount_share_list: list of shares to unmount """ client = manila.client() unmount_share_ids = (set(s['id'] for s in unmount_share_list)) handlers_by_share_id = {id: _ShareHandler.create_from_id(id, client) for id in unmount_share_ids} for share in unmount_share_list: for ng in cluster.node_groups: for instance in ng.instances: handlers_by_share_id[share['id']].unmount_from_instance( instance.remote(), share) _mount = collections.namedtuple('Mount', ['node_group', 'share_config']) def _group_mounts(mounts, grouper): result = collections.defaultdict(list) for mount in mounts: result[grouper(mount)].append(mount) return result def _group_mounts_by_share_id(mounts): return _group_mounts(mounts, lambda mount: mount.share_config['id']) def _group_mounts_by_ng_id(mounts): return _group_mounts(mounts, lambda mount: mount.node_group['id']) class _NodeGroupShares(object): """Organizes share mounting for a single node group.""" _share = collections.namedtuple('Share', ['share_config', 'handler']) def __init__(self, node_group): self.node_group = node_group self.shares = [] def add_share(self, share_config, handler): """Adds a share to mount; add all shares before mounting.""" self.shares.append(self._share(share_config, handler)) def mount_shares_to_node_group(self): """Mounts all configured shares to the node group.""" for instance in self.node_group.instances: with context.set_current_instance_id(instance.instance_id): self._mount_shares_to_instance(instance) def _mount_shares_to_instance(self, instance): # Note: Additional iteration here is critical: based on # experimentation, failure to execute allow_access before spawning # the remote results in permission failure. for share in self.shares: share.handler.allow_access_to_instance(instance, share.share_config) with instance.remote() as remote: share_types = set(type(share.handler) for share in self.shares) for share_type in share_types: share_type.setup_instance(remote) for share in self.shares: share.handler.mount_to_instance(remote, share.share_config) @six.add_metaclass(abc.ABCMeta) class _ShareHandler(object): """Handles mounting of a single share to any number of instances.""" @classmethod def setup_instance(cls, remote): """Prepares an instance to mount this type of share.""" pass @classmethod def create_from_id(cls, share_id, client): """Factory method for creation from a share_id of unknown type.""" share = manila.get_share(client, share_id, raise_on_error=True) mounter_class = _share_types[share.share_proto] return mounter_class(share, client) def __init__(self, share, client): self.share = share self.client = client def allow_access_to_instance(self, instance, share_config): """Mounts a specific share to a specific instance.""" access_level = self._get_access_level(share_config) accesses = list(filter(lambda x: (x.access_type == 'ip' and x.access_to == instance.internal_ip), self.share.access_list())) if accesses: access = accesses[0] if access.access_level not in ('ro', 'rw'): LOG.warning("Unknown permission level {access_level} on share " "id {share_id} for ip {ip}. Leaving pre-existing " "permissions.".format( access_level=access.access_level, share_id=self.share.id, ip=instance.internal_ip)) elif access.access_level == 'ro' and access_level == 'rw': self.share.deny(access.id) self.share.allow('ip', instance.internal_ip, access_level) else: self.share.allow('ip', instance.internal_ip, access_level) @abc.abstractmethod def mount_to_instance(self, remote, share_info): """Mounts the share to the instance as configured.""" pass @abc.abstractmethod def unmount_from_instance(self, remote, share_info): """Unmounts the share from the instance.""" pass def _get_access_level(self, share_config): return share_config.get('access_level', 'rw') def _default_mount(self): return '/mnt/{0}'.format(self.share.id) def _get_path(self, share_info): return share_info.get('path', self._default_mount()) class _NFSMounter(_ShareHandler): """Handles mounting of a single NFS share to any number of instances.""" _DEBIAN_INSTALL = "dpkg -s nfs-common || apt-get -y install nfs-common" _REDHAT_INSTALL = "rpm -q nfs-utils || yum install -y nfs-utils" _NFS_CHECKS = { "centos": _REDHAT_INSTALL, "fedora": _REDHAT_INSTALL, "redhatenterpriseserver": _REDHAT_INSTALL, "redhat": _REDHAT_INSTALL, "ubuntu": _DEBIAN_INSTALL } _MKDIR_COMMAND = 'mkdir -p %s' _MOUNT_COMMAND = ("mount | grep '%(remote)s' | grep '%(local)s' | " "grep nfs || mount -t nfs %(access_arg)s %(remote)s " "%(local)s") _UNMOUNT_COMMAND = ("umount -f %s ") _RMDIR_COMMAND = 'rmdir %s' @classmethod def setup_instance(cls, remote): """Prepares an instance to mount this type of share.""" distro = remote.get_os_distrib() if distro in cls._NFS_CHECKS: command = cls._NFS_CHECKS[distro] remote.execute_command(command, run_as_root=True) else: LOG.warning("Cannot verify installation of NFS mount tools for " "unknown distro {distro}.".format(distro=distro)) def mount_to_instance(self, remote, share_info): """Mounts the share to the instance as configured.""" local_path = self._get_path(share_info) access_level = self._get_access_level(share_info) access_arg = '-w' if access_level == 'rw' else '-r' remote.execute_command(self._MKDIR_COMMAND % local_path, run_as_root=True) mount_command = self._MOUNT_COMMAND % { "remote": self.share.export_location, "local": local_path, "access_arg": access_arg} remote.execute_command(mount_command, run_as_root=True) def unmount_from_instance(self, remote, share_info): """Unmounts the share from the instance.""" local_path = self._get_path(share_info) unmount_command = self._UNMOUNT_COMMAND % local_path rmdir_command = self._RMDIR_COMMAND % local_path remote.execute_command(unmount_command, run_as_root=True) remote.execute_command(rmdir_command, run_as_root=True) _share_types = {"NFS": _NFSMounter} SUPPORTED_SHARE_TYPES = _share_types.keys() def make_share_path(mount_point, path): return "{0}{1}".format(mount_point, path) def default_mount(share_id): client = manila.client() return _ShareHandler.create_from_id(share_id, client)._default_mount() def get_share_path(url, shares): # url example: 'manila://ManilaShare-uuid/path_to_file' url = six.moves.urllib.parse.urlparse(url) # using list() as a python2/3 workaround share_list = list(filter(lambda s: s['id'] == url.netloc, shares)) if not share_list: # Share id is not in the share list, let the caller # determine a default path if possible path = None else: # We will always select the first one. Let the # caller determine whether duplicates are okay mount_point = share_list[0].get('path', None) # Do this in two steps instead of passing the default # expression to get(), because it's a big side effect if mount_point is None: # The situation here is that the user specified a # share without a path, so the default mnt was used # during cluster provisioning. mount_point = default_mount(share_list[0]['id']) path = make_share_path(mount_point, url.path) return path ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/engine.py0000664000175000017500000002116600000000000017604 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import datetime import string from oslo_config import cfg from oslo_log import log as logging import six from sahara import conductor as c from sahara import context from sahara.i18n import _ from sahara.service import networks from sahara.utils import cluster as cluster_utils from sahara.utils import cluster_progress_ops as cpo from sahara.utils import edp from sahara.utils.openstack import base as b from sahara.utils.openstack import images as sahara_images from sahara.utils import poll_utils from sahara.utils import remote LOG = logging.getLogger(__name__) conductor = c.API CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class Engine(object): @abc.abstractmethod def create_cluster(self, cluster): pass @abc.abstractmethod def scale_cluster(self, cluster, node_group_id_map): pass @abc.abstractmethod def shutdown_cluster(self, cluster, force): pass @abc.abstractmethod def rollback_cluster(self, cluster, reason): pass @abc.abstractmethod def get_type_and_version(self): """Returns engine type and version Result should be in the form 'type.major.minor'. """ def get_node_group_image_username(self, node_group): image_id = node_group.get_image_id() return b.execute_with_retries( sahara_images.image_manager().get, image_id).username @poll_utils.poll_status('ips_assign_timeout', _("Assign IPs"), sleep=1) def _ips_assign(self, ips_assigned, cluster, instances): if not cluster_utils.check_cluster_exists(cluster): return True for instance in instances: if instance.id not in ips_assigned: with context.set_current_instance_id(instance.instance_id): if networks.init_instances_ips(instance): ips_assigned.add(instance.id) cpo.add_successful_event(instance) return len(ips_assigned) == len(instances) def _await_networks(self, cluster, instances): if not instances: return cpo.add_provisioning_step(cluster.id, _("Assign IPs"), len(instances)) ips_assigned = set() self._ips_assign(ips_assigned, cluster, instances) LOG.info("All instances have IPs assigned") cluster = conductor.cluster_get(context.ctx(), cluster) instances = cluster_utils.get_instances(cluster, ips_assigned) cpo.add_provisioning_step( cluster.id, _("Wait for instance accessibility"), len(instances)) with context.ThreadGroup() as tg: for instance in instances: with context.set_current_instance_id(instance.instance_id): tg.spawn("wait-for-ssh-%s" % instance.instance_name, self._wait_until_accessible, instance) LOG.info("All instances are accessible") @poll_utils.poll_status( 'wait_until_accessible', _("Wait for instance accessibility"), sleep=5) def _is_accessible(self, instance): if not cluster_utils.check_cluster_exists(instance.cluster): return True try: # check if ssh is accessible and cloud-init # script is finished generating authorized_keys exit_code, stdout = instance.remote().execute_command( "ls .ssh/authorized_keys", raise_when_error=False) if exit_code == 0: LOG.debug('Instance is accessible') return True except Exception as ex: ip_used = "internal_ip" if CONF.proxy_command and \ CONF.proxy_command_use_internal_ip else "management_ip" LOG.debug("Can't login to node, IP: {ip}, reason {reason}" .format(ip=getattr(instance, ip_used), reason=ex)) return False return False @cpo.event_wrapper(mark_successful_on_exit=True) def _wait_until_accessible(self, instance): self._is_accessible(instance) def _configure_instances(self, cluster): """Configure active instances. * generate /etc/hosts * change /etc/resolv.conf * setup passwordless login * etc. """ cpo.add_provisioning_step( cluster.id, _("Configure instances"), cluster_utils.count_instances(cluster)) with context.ThreadGroup() as tg: for node_group in cluster.node_groups: for instance in node_group.instances: with context.set_current_instance_id(instance.instance_id): tg.spawn("configure-instance-{}".format( instance.instance_name), self._configure_instance, instance, cluster ) @cpo.event_wrapper(mark_successful_on_exit=True) def _configure_instance(self, instance, cluster): self._configure_instance_etc_hosts(instance, cluster) if cluster.use_designate_feature(): self._configure_instance_resolve_conf(instance) def _configure_instance_etc_hosts(self, instance, cluster): LOG.debug('Configuring "/etc/hosts" of instance.') hosts_file = cluster_utils.generate_etc_hosts(cluster) hostname = instance.fqdn() with instance.remote() as r: r.write_file_to('etc-hosts', hosts_file) r.write_file_to('etc-hostname', hostname) r.execute_command('sudo hostname %s' % hostname) r.execute_command('sudo cp etc-hosts /etc/hosts') r.execute_command('sudo cp etc-hostname /etc/hostname') r.execute_command('sudo rm etc-hosts etc-hostname') r.execute_command('sudo usermod -s /bin/bash $USER') def _configure_instance_resolve_conf(self, instance): LOG.debug('Setting up those name servers from sahara.conf ' 'which are lacked in the /etc/resolv.conf.') with instance.remote() as r: code, curr_resolv_conf = r.execute_command('cat /etc/resolv.conf') diff = cluster_utils.generate_resolv_conf_diff(curr_resolv_conf) if diff.strip(): position = curr_resolv_conf.find('nameserver') if position == -1: position = 0 new_resolv_conf = "{}\n{}{}".format( curr_resolv_conf[:position], diff, curr_resolv_conf[position:]) r.write_file_to('resolv-conf', new_resolv_conf) r.execute_command('sudo mv resolv-conf /etc/resolv.conf') def _generate_user_data_script(self, node_group, instance_name): script = """#!/bin/bash echo "${public_key}" >> ${user_home}/.ssh/authorized_keys\n # ====== COMMENT OUT Defaults requiretty in /etc/sudoers ======== sed '/^Defaults requiretty*/ s/^/#/' -i /etc/sudoers\n """ script += remote.get_userdata_template() username = node_group.image_username if username == "root": user_home = "/root/" else: user_home = "/home/%s/" % username script_template = string.Template(script) return script_template.safe_substitute( public_key=node_group.cluster.management_public_key, user_home=user_home, instance_name=instance_name) # Deletion ops def _clean_job_executions(self, cluster): ctx = context.ctx() for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id): update = {"cluster_id": None} if not je.end_time: info = je.info.copy() if je.info else {} info['status'] = edp.JOB_STATUS_KILLED update.update({"info": info, "end_time": datetime.datetime.now()}) conductor.job_execution_update(ctx, je, update) def _remove_db_objects(self, cluster): ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster) instances = cluster_utils.get_instances(cluster) for inst in instances: conductor.instance_remove(ctx, inst) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7418911 sahara-16.0.0/sahara/service/health/0000775000175000017500000000000000000000000017224 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/health/__init__.py0000664000175000017500000000000000000000000021323 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/health/common.py0000664000175000017500000000311000000000000021061 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg HEALTH_STATUS_GREEN = "GREEN" HEALTH_STATUS_YELLOW = "YELLOW" HEALTH_STATUS_RED = "RED" HEALTH_STATUS_CHECKING = "CHECKING" HEALTH_STATUS_DONE = [ HEALTH_STATUS_GREEN, HEALTH_STATUS_YELLOW, HEALTH_STATUS_RED, ] VERIFICATIONS_START_OPS = "START" VERIFICATIONS_OPS = [ VERIFICATIONS_START_OPS, ] CONF = cfg.CONF health_opts = [ cfg.BoolOpt('verification_enable', default=True, help="Option to enable verifications for all clusters"), cfg.IntOpt('verification_periodic_interval', default=600, help="Interval between two consecutive periodic tasks for " "verifications, in seconds."), cfg.IntOpt('verification_timeout', default=600, help="Time limit for health check function, in seconds.") ] health_opts_group = cfg.OptGroup( 'cluster_verifications', title='Options to configure verifications') CONF.register_group(group=health_opts_group) CONF.register_opts(health_opts, group=health_opts_group) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/health/verification_base.py0000664000175000017500000001212100000000000023247 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections from oslo_config import cfg from oslo_log import log as logging from sahara import conductor from sahara import context from sahara import exceptions from sahara.i18n import _ from sahara.plugins import health_check_base from sahara.service.health import common from sahara.utils import cluster as cluster_utils cond = conductor.API CONF = cfg.CONF LOG = logging.getLogger(__name__) class CannotVerifyError(exceptions.SaharaException): code = "CANNOT_VERIFY" message_template = _("Cannot verify cluster. Reason: %s") def __init__(self, reason): message = self.message_template % reason super(CannotVerifyError, self).__init__(message) def get_possible_ops(): return common.VERIFICATIONS_OPS def verification_exists(cluster): try: if cluster.verification is not None: return True except (AttributeError, KeyError): return False def validate_verification_ops(cluster, data): status = data.get('verification', {}).get('status', None) if not status: # update doesn't affect verifications return False if len(list(data.keys())) != 1: raise CannotVerifyError( _("Can't update verification with other updates")) if status == common.VERIFICATIONS_START_OPS: return validate_verification_start(cluster) def clean_verification_data(cluster): cluster = cond.cluster_get(context.ctx(), cluster) if verification_exists(cluster): try: vid = cluster.verification.id cond.cluster_verification_delete(context.ctx(), vid) except exceptions.NotFoundException: LOG.debug("Verification data already cleaned") def validate_verification_start(cluster): if not CONF.cluster_verifications.verification_enable: raise CannotVerifyError(_("All verifications are disabled")) ctx = context.ctx() cluster = cond.cluster_get(ctx, cluster) if not cluster or cluster.status != cluster_utils.CLUSTER_STATUS_ACTIVE: raise CannotVerifyError(_("Cluster is not active or doesn't exists")) if not verification_exists(cluster): return True if cluster.verification.status == common.HEALTH_STATUS_CHECKING: raise CannotVerifyError( _("Cluster verification in state %s") % common.HEALTH_STATUS_CHECKING) return True def _prepare_verification_running(ctx, cluster): if verification_exists(cluster): vid = cluster.verification.id # to delete all data related to the previous one cond.cluster_verification_delete(ctx, vid) return (cond.cluster_verification_add( ctx, cluster, {'status': common.HEALTH_STATUS_CHECKING}), cond.cluster_get(ctx, cluster)) def _execute_health_checks(ctx, cluster): health_checks = health_check_base.get_health_checks(cluster) actual = [] with context.ThreadGroup() as tg: for check in health_checks: actual_check = check(cluster) actual.append(actual_check) tg.spawn('health-check-exc', actual_check.execute) def _decide_status_for_verification(ctx, verification): ver = cond.cluster_verification_get(ctx, verification) cnt = collections.Counter() for check in ver.checks: cnt[check.status] += 1 if cnt[common.HEALTH_STATUS_GREEN] == len(ver.checks): decided_status = common.HEALTH_STATUS_GREEN elif cnt[common.HEALTH_STATUS_RED] > 0: decided_status = common.HEALTH_STATUS_RED else: decided_status = common.HEALTH_STATUS_YELLOW return cond.cluster_verification_update( context.ctx(), ver.id, {'status': decided_status}) def verification_run(cluster): ctx = context.ctx() LOG.debug("Running verification for the cluster") ver, cluster = _prepare_verification_running(ctx, cluster) _execute_health_checks(ctx, cluster) return _decide_status_for_verification(ctx, ver) def handle_verification(cluster, values): cluster = cond.cluster_get(context.ctx(), cluster) context.set_current_cluster_id(cluster.id) values = {} if not values else values status = values.get('verification', {}).get('status', None) if status == common.VERIFICATIONS_START_OPS: verification_run(cluster) def update_verification_required(values): if values.get('verification', {}).get('status', None): return True return False def get_verification_periodic_interval(): return (CONF.cluster_verifications.verification_periodic_interval if CONF.cluster_verifications.verification_enable else -1) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1648641486.7418911 sahara-16.0.0/sahara/service/heat/0000775000175000017500000000000000000000000016700 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/heat/__init__.py0000664000175000017500000000000000000000000020777 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/heat/commons.py0000664000175000017500000000121500000000000020724 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. HEAT_ENGINE_VERSION = 'heat.3.0' HEAT_TEMPLATE_VERSION = '2016-04-08' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/heat/heat_engine.py0000664000175000017500000002436000000000000021525 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from heatclient import exc as heat_exc from oslo_config import cfg from oslo_log import log as logging from sahara import conductor as c from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.service import engine as e from sahara.service.heat import commons as heat_common from sahara.service.heat import templates as ht from sahara.service import volumes from sahara.utils import cluster as c_u from sahara.utils import cluster_progress_ops as cpo from sahara.utils.openstack import heat conductor = c.API CONF = cfg.CONF LOG = logging.getLogger(__name__) CREATE_STAGES = [c_u.CLUSTER_STATUS_SPAWNING, c_u.CLUSTER_STATUS_WAITING, c_u.CLUSTER_STATUS_PREPARING] SCALE_STAGES = [c_u.CLUSTER_STATUS_SCALING_SPAWNING, c_u.CLUSTER_STATUS_SCALING_WAITING, c_u.CLUSTER_STATUS_SCALING_PREPARING] ROLLBACK_STAGES = [c_u.CLUSTER_STATUS_ROLLBACK_SPAWNING, c_u.CLUSTER_STATUS_ROLLBACK_WAITING, c_u.CLUSTER_STATUS_ROLLBACK__PREPARING] heat_engine_opts = [ cfg.ListOpt('heat_stack_tags', default=['data-processing-cluster'], help="List of tags to be used during operating with stack.") ] CONF.register_opts(heat_engine_opts) class HeatEngine(e.Engine): def get_type_and_version(self): return heat_common.HEAT_ENGINE_VERSION def create_cluster(self, cluster): self._update_rollback_strategy(cluster, shutdown=True) target_count = self._get_ng_counts(cluster) self._nullify_ng_counts(cluster) cluster = self._generate_heat_stack_name(cluster) self._launch_instances(cluster, target_count, CREATE_STAGES) self._update_rollback_strategy(cluster) @staticmethod def _generate_heat_stack_name(cluster): cluster = conductor.cluster_get(context.ctx(), cluster) hsn = cluster.name + cluster.id[:8] extra = cluster.extra.to_dict() if cluster.extra else {} extra['heat_stack_name'] = hsn conductor.cluster_update(context.ctx(), cluster, {'extra': extra}) return conductor.cluster_get(context.ctx(), cluster) def _get_ng_counts(self, cluster): count = {} for node_group in cluster.node_groups: count[node_group.id] = node_group.count return count def _nullify_ng_counts(self, cluster): ctx = context.ctx() for node_group in cluster.node_groups: conductor.node_group_update(ctx, node_group, {"count": 0}) def scale_cluster(self, cluster, target_count, instances_to_delete=None): ctx = context.ctx() rollback_count = self._get_ng_counts(cluster) self._update_rollback_strategy(cluster, rollback_count=rollback_count, target_count=target_count) inst_ids = self._launch_instances( cluster, target_count, SCALE_STAGES, update_stack=True, disable_rollback=False, instances_to_delete=instances_to_delete) cluster = conductor.cluster_get(ctx, cluster) c_u.clean_cluster_from_empty_ng(cluster) self._update_rollback_strategy(cluster) return inst_ids def rollback_cluster(self, cluster, reason): rollback_info = cluster.rollback_info or {} self._update_rollback_strategy(cluster) if rollback_info.get('shutdown', False): self._rollback_cluster_creation(cluster, reason) LOG.warning("Cluster creation rollback " "(reason: {reason})".format(reason=reason)) return False rollback_count = rollback_info.get('rollback_count', {}).copy() target_count = rollback_info.get('target_count', {}).copy() if rollback_count or target_count: self._rollback_cluster_scaling( cluster, rollback_count, target_count, reason) LOG.warning("Cluster scaling rollback " "(reason: {reason})".format(reason=reason)) return True return False def _update_rollback_strategy(self, cluster, shutdown=False, rollback_count=None, target_count=None): rollback_info = {} if shutdown: rollback_info['shutdown'] = shutdown if rollback_count: rollback_info['rollback_count'] = rollback_count if target_count: rollback_info['target_count'] = target_count cluster = conductor.cluster_update( context.ctx(), cluster, {'rollback_info': rollback_info}) return cluster def _populate_cluster(self, cluster, stack): ctx = context.ctx() old_ids = [i.instance_id for i in c_u.get_instances(cluster)] new_ids = [] for node_group in cluster.node_groups: instances = stack.get_node_group_instances(node_group) for instance in instances: nova_id = instance['physical_id'] if nova_id not in old_ids: name = instance['name'] inst = { "instance_id": nova_id, "instance_name": name } if cluster.use_designate_feature(): inst.update( {"dns_hostname": name + '.' + cluster.domain_name[:-1]}) instance_id = conductor.instance_add(ctx, node_group, inst) new_ids.append(instance_id) return new_ids def _rollback_cluster_creation(self, cluster, ex): """Shutdown all instances and update cluster status.""" self.shutdown_cluster(cluster) def _rollback_cluster_scaling(self, cluster, rollback_count, target_count, ex): """Attempt to rollback cluster scaling. Our rollback policy for scaling is as follows: We shut down nodes created during scaling, but we don't try to to get back decommissioned nodes. I.e. during the rollback we only shut down nodes and not launch them. That approach should maximize the chance of rollback success. """ for ng in rollback_count: if rollback_count[ng] > target_count[ng]: rollback_count[ng] = target_count[ng] self._launch_instances(cluster, rollback_count, ROLLBACK_STAGES, update_stack=True) def shutdown_cluster(self, cluster, force=False): """Shutdown specified cluster and all related resources.""" if force: heat_shutdown = heat.lazy_delete_stack else: heat_shutdown = heat.delete_stack try: heat_shutdown(cluster) except heat_exc.HTTPNotFound: LOG.warning('Did not find stack for cluster.') except ex.HeatStackException: raise self._clean_job_executions(cluster) self._remove_db_objects(cluster) @cpo.event_wrapper( True, step=_('Create Heat stack'), param=('cluster', 1)) def _create_instances(self, cluster, target_count, update_stack=False, disable_rollback=True, instances_to_delete=None): stack = ht.ClusterStack(cluster) self._update_instance_count(stack, cluster, target_count, instances_to_delete) stack.instantiate(update_existing=update_stack, disable_rollback=disable_rollback) heat.wait_stack_completion( cluster, is_update=update_stack, last_updated_time=stack.last_updated_time) return self._populate_cluster(cluster, stack) def _launch_instances(self, cluster, target_count, stages, update_stack=False, disable_rollback=True, instances_to_delete=None): # create all instances cluster = c_u.change_cluster_status(cluster, stages[0]) inst_ids = self._create_instances( cluster, target_count, update_stack, disable_rollback, instances_to_delete) # wait for all instances are up and networks ready cluster = c_u.change_cluster_status(cluster, stages[1]) instances = c_u.get_instances(cluster, inst_ids) self._await_networks(cluster, instances) # prepare all instances cluster = c_u.change_cluster_status(cluster, stages[2]) instances = c_u.get_instances(cluster, inst_ids) volumes.mount_to_instances(instances) self._configure_instances(cluster) return inst_ids def _update_instance_count(self, stack, cluster, target_count, instances_to_delete=None): ctx = context.ctx() instances_name_to_delete = {} if instances_to_delete: for instance in instances_to_delete: node_group_id = instance['node_group']['id'] if node_group_id not in instances_name_to_delete: instances_name_to_delete[node_group_id] = [] instances_name_to_delete[node_group_id].append( instance['instance_name']) for node_group in cluster.node_groups: count = target_count[node_group.id] stack.add_node_group_extra( node_group.id, count, self._generate_user_data_script, instances_name_to_delete.get(node_group.id, None)) for inst in node_group.instances: if (instances_to_delete and node_group.id in instances_name_to_delete): if (inst.instance_name in instances_name_to_delete[node_group.id]): conductor.instance_remove(ctx, inst) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/heat/templates.py0000664000175000017500000006004600000000000021256 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json import six import yaml from sahara.plugins import provisioning as plugin_provisioning from sahara.service.heat import commons as heat_common from sahara.utils import cluster as cl from sahara.utils import general as g from sahara.utils.openstack import base as b from sahara.utils.openstack import heat as h from sahara.utils.openstack import neutron from sahara.utils.openstack import nova CONF = cfg.CONF LOG = logging.getLogger(__name__) SSH_PORT = 22 INSTANCE_RESOURCE_NAME = "inst" SERVER_GROUP_NAMES = "servgroups" AUTO_SECURITY_GROUP_PARAM_NAME = "autosecgroup" INTERNAL_DESIGNATE_REC = "internal_designate_record" INTERNAL_DESIGNATE_REV_REC = "internal_designate_reverse_record" EXTERNAL_DESIGNATE_REC = "external_designate_record" EXTERNAL_DESIGNATE_REV_REC = "external_designate_reverse_record" # TODO(vgridnev): Using insecure flag until correct way to pass certificate # will be invented WAIT_CONDITION_SCRIPT_TEMPLATE = ''' while true; do wc_notify --insecure --data-binary '{"status": "SUCCESS"}' if [ $? -eq 0 ]; then break fi sleep 10 done ''' heat_engine_opts = [ cfg.BoolOpt( 'heat_enable_wait_condition', default=True, help="Enable wait condition feature to reduce polling during cluster " "creation") ] CONF.register_opts(heat_engine_opts) def _get_inst_name(ng): return { "list_join": [ '-', [ng.cluster.name.lower(), ng.name.lower(), {"get_param": "instance_index"}] ] } def _get_inst_domain_name(domain): return { "list_join": [ '.', [{"get_attr": [INSTANCE_RESOURCE_NAME, "name"]}, domain]] } def _get_aa_group_name(cluster, server_group_index): return g.generate_aa_group_name(cluster.name, server_group_index) def _get_port_name(ng): return { "list_join": [ '-', [ng.cluster.name.lower(), ng.name.lower(), {"get_param": "instance_index"}, "port"] ] } def _get_floating_name(ng): return { "list_join": [ '-', [ng.cluster.name.lower(), ng.name.lower(), {"get_param": "instance_index"}, "floating"] ] } def _get_floating_assoc_name(ng): return { "list_join": [ '-', [ng.cluster.name.lower(), ng.name.lower(), {"get_param": "instance_index"}, "floating", "assoc"] ] } def _get_volume_name(ng): return { "list_join": [ '-', [ng.cluster.name.lower(), ng.name.lower(), {"get_param": "instance_index"}, "volume", {"get_param": "volume_index"}] ] } def _get_wc_handle_name(inst_name): return '%s-wc-handle' % inst_name def _get_wc_waiter_name(inst_name): return '%s-wc-waiter' % inst_name def _get_index_from_inst_name(inst_name): return inst_name.split('-')[-1] class ClusterStack(object): def __init__(self, cluster): self.cluster = cluster self.node_groups_extra = {} self.files = {} self.last_updated_time = None self.base_info = ( "Data Processing Cluster by Sahara\n" "Sahara cluster name: {cluster}\n" "Sahara engine: {version}".format( cluster=cluster.name, version=heat_common.HEAT_ENGINE_VERSION) ) self._current_sg_index = 1 def _node_group_description(self, ng): return "{info}\nNode group {node_group}".format( info=self.base_info, node_group=ng.name) def _asg_for_node_group_description(self, ng): return ("{info}\n" "Auto security group for Sahara Node Group: " "{node_group}".format(info=self.base_info, node_group=ng.name)) def _volume_for_node_group_description(self, ng): return ("{info}\n" "Volume for Sahara Node Group {node_group}".format( node_group=ng.name, info=self.base_info)) def add_node_group_extra(self, node_group_id, node_count, gen_userdata_func, instances_to_delete=None): self.node_groups_extra[node_group_id] = { 'node_count': node_count, 'gen_userdata_func': gen_userdata_func, 'instances_to_delete': instances_to_delete } def _get_main_template(self, instances_to_delete=None): outputs = {} resources = self._serialize_resources(outputs, instances_to_delete) return yaml.safe_dump({ "heat_template_version": heat_common.HEAT_TEMPLATE_VERSION, "description": self.base_info, "resources": resources, "outputs": outputs }) def instantiate(self, update_existing, disable_rollback=True, instances_to_delete=None): main_tmpl = self._get_main_template(instances_to_delete) kwargs = { 'stack_name': self.cluster.stack_name, 'timeout_mins': 180, 'disable_rollback': disable_rollback, 'parameters': {}, 'template': main_tmpl, 'files': self.files } if CONF.heat_stack_tags: kwargs['tags'] = ",".join(CONF.heat_stack_tags) log_kwargs = copy.deepcopy(kwargs) log_kwargs['template'] = yaml.safe_load(log_kwargs['template']) for filename in log_kwargs['files'].keys(): log_kwargs['files'][filename] = yaml.safe_load( log_kwargs['files'][filename]) log_kwargs = json.dumps(log_kwargs) if not update_existing: LOG.debug("Creating Heat stack with args: \n{args}" .format(args=log_kwargs)) b.execute_with_retries(h.client().stacks.create, **kwargs) else: stack = h.get_stack(self.cluster.stack_name) self.last_updated_time = stack.updated_time LOG.debug("Updating Heat stack {stack} with args: \n" "{args}".format(stack=stack, args=log_kwargs)) b.execute_with_retries(stack.update, **kwargs) def _get_server_group_name(self): index = self._current_sg_index # computing server group index in round robin fashion if index < self.cluster.anti_affinity_ratio: self._current_sg_index = (index + 1) else: self._current_sg_index = 1 return _get_aa_group_name(self.cluster, self._current_sg_index) def _need_aa_server_group(self, node_group): for node_process in node_group.node_processes: if node_process in self.cluster.anti_affinity: return True return False def _get_anti_affinity_scheduler_hints(self, node_group): if not self._need_aa_server_group(node_group): return {} return { "scheduler_hints": { "group": { "get_param": [SERVER_GROUP_NAMES, {"get_param": "instance_index"}] } } } def _serialize_resources(self, outputs, instances_to_delete=None): resources = {} if self.cluster.anti_affinity: # Creating server groups equal to the anti_affinity_ratio for i in range(0, self.cluster.anti_affinity_ratio): resources.update(self._serialize_aa_server_group(i + 1)) for ng in self.cluster.node_groups: resources.update(self._serialize_ng_group(ng, outputs, instances_to_delete)) for ng in self.cluster.node_groups: resources.update(self._serialize_auto_security_group(ng)) return resources def _serialize_ng_group(self, ng, outputs, instances_to_delete=None): ng_file_name = "file://" + ng.name + ".yaml" self.files[ng_file_name] = self._serialize_ng_file(ng) outputs[ng.name + "-instances"] = { "value": {"get_attr": [ng.name, "instance"]}} properties = {"instance_index": "%index%"} if ng.cluster.anti_affinity: ng_count = self.node_groups_extra[ng.id]['node_count'] # assuming instance_index also start from index 0 for i in range(0, ng_count): server_group_name = self._get_server_group_name() server_group_resource = { "get_resource": server_group_name } if SERVER_GROUP_NAMES not in properties: properties[SERVER_GROUP_NAMES] = [] properties[SERVER_GROUP_NAMES].insert(i, server_group_resource) if ng.auto_security_group: properties[AUTO_SECURITY_GROUP_PARAM_NAME] = { 'get_resource': g.generate_auto_security_group_name(ng)} removal_policies = [] if self.node_groups_extra[ng.id]['instances_to_delete']: resource_list = [] for name in self.node_groups_extra[ng.id]['instances_to_delete']: resource_list.append(_get_index_from_inst_name(name)) removal_policies.append({'resource_list': resource_list}) return { ng.name: { "type": "OS::Heat::ResourceGroup", "properties": { "count": self.node_groups_extra[ng.id]['node_count'], "removal_policies": removal_policies, "resource_def": { "type": ng_file_name, "properties": properties } } } } def _serialize_ng_file(self, ng): parameters = {"instance_index": {"type": "string"}} if ng.cluster.anti_affinity: parameters[SERVER_GROUP_NAMES] = {"type": "comma_delimited_list", "default": []} if ng.auto_security_group: parameters[AUTO_SECURITY_GROUP_PARAM_NAME] = {'type': "string"} return yaml.safe_dump({ "heat_template_version": heat_common.HEAT_TEMPLATE_VERSION, "description": self._node_group_description(ng), "parameters": parameters, "resources": self._serialize_instance(ng), "outputs": { "instance": {"value": { "physical_id": {"get_resource": INSTANCE_RESOURCE_NAME}, "name": {"get_attr": [INSTANCE_RESOURCE_NAME, "name"]} }}} }) def _serialize_auto_security_group(self, ng): if not ng.auto_security_group: return {} security_group_name = g.generate_auto_security_group_name(ng) security_group_description = self._asg_for_node_group_description(ng) res_type = "OS::Neutron::SecurityGroup" desc_key = "description" rules_key = "rules" create_rule = lambda ip_version, cidr, proto, from_port, to_port: { "ethertype": "IPv{}".format(ip_version), "remote_ip_prefix": cidr, "protocol": proto, "port_range_min": six.text_type(from_port), "port_range_max": six.text_type(to_port)} rules = self._serialize_auto_security_group_rules(ng, create_rule) return { security_group_name: { "type": res_type, "properties": { desc_key: security_group_description, rules_key: rules } } } def _serialize_auto_security_group_rules(self, ng, create_rule): rules = [] for port in ng.open_ports: rules.append(create_rule(4, '0.0.0.0/0', 'tcp', port, port)) rules.append(create_rule(6, '::/0', 'tcp', port, port)) rules.append(create_rule(4, '0.0.0.0/0', 'tcp', SSH_PORT, SSH_PORT)) rules.append(create_rule(6, '::/0', 'tcp', SSH_PORT, SSH_PORT)) # open all traffic for private networks for cidr in neutron.get_private_network_cidrs(ng.cluster): ip_ver = 6 if ':' in cidr else 4 for protocol in ['tcp', 'udp']: rules.append(create_rule(ip_ver, cidr, protocol, 1, 65535)) rules.append(create_rule(ip_ver, cidr, 'icmp', 0, 255)) return rules @staticmethod def _get_wait_condition_timeout(ng): configs = ng.cluster.cluster_configs timeout_cfg = plugin_provisioning.HEAT_WAIT_CONDITION_TIMEOUT cfg_target = timeout_cfg.applicable_target cfg_name = timeout_cfg.name return int(configs.get(cfg_target, {}).get(cfg_name, timeout_cfg.default_value)) def _serialize_designate_records(self): if not self.cluster.use_designate_feature(): return {} hostname = _get_inst_domain_name(self.cluster.domain_name) return { INTERNAL_DESIGNATE_REC: { 'type': 'OS::Designate::Record', 'properties': { 'name': hostname, 'type': 'A', 'data': {'get_attr': [ INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]}, 'domain': self.cluster.domain_name } }, EXTERNAL_DESIGNATE_REC: { 'type': 'OS::Designate::Record', 'properties': { 'name': hostname, 'type': 'A', 'data': {'get_attr': ['floating_ip', 'ip']}, 'domain': self.cluster.domain_name } } } def _serialize_designate_reverse_records(self): if not self.cluster.use_designate_feature(): return {} def _generate_reversed_ip(ip): return { 'list_join': [ '.', [ {'str_split': ['.', ip, 3]}, {'str_split': ['.', ip, 2]}, {'str_split': ['.', ip, 1]}, {'str_split': ['.', ip, 0]}, 'in-addr.arpa.' ] ] } hostname = _get_inst_domain_name(self.cluster.domain_name) return { INTERNAL_DESIGNATE_REV_REC: { 'type': 'OS::Designate::Record', 'properties': { 'name': _generate_reversed_ip({'get_attr': [ INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]}), 'type': 'PTR', 'data': hostname, 'domain': 'in-addr.arpa.' } }, EXTERNAL_DESIGNATE_REV_REC: { 'type': 'OS::Designate::Record', 'properties': { 'name': _generate_reversed_ip( {'get_attr': ['floating_ip', 'ip']}), 'type': 'PTR', 'data': hostname, 'domain': 'in-addr.arpa.' } } } def _serialize_instance(self, ng): resources = {} properties = {} inst_name = _get_inst_name(ng) private_net = self.cluster.neutron_management_network sec_groups = self._get_security_groups(ng) # Check if cluster contains user key-pair and include it to template. if self.cluster.user_keypair_id: properties["key_name"] = self.cluster.user_keypair_id port_name = _get_port_name(ng) resources.update(self._serialize_port( port_name, private_net, sec_groups)) properties["networks"] = [{"port": {"get_resource": "port"}}] if ng.floating_ip_pool: resources.update(self._serialize_neutron_floating(ng)) gen_userdata_func = self.node_groups_extra[ng.id]['gen_userdata_func'] key_script = gen_userdata_func(ng, inst_name) if CONF.heat_enable_wait_condition: etc_hosts = cl.etc_hosts_entry_for_service('orchestration') if etc_hosts: etc_hosts = "echo '%s' | sudo tee -a /etc/hosts" % etc_hosts tml = [key_script, WAIT_CONDITION_SCRIPT_TEMPLATE] if etc_hosts: tml = [key_script, etc_hosts, WAIT_CONDITION_SCRIPT_TEMPLATE] userdata = { "str_replace": { "template": "\n".join(tml), "params": { "wc_notify": { "get_attr": [ _get_wc_handle_name(ng.name), "curl_cli" ] } } } } else: userdata = key_script if ng.availability_zone: properties["availability_zone"] = ng.availability_zone properties.update(self._get_anti_affinity_scheduler_hints(ng)) properties.update({ "name": inst_name, "flavor": six.text_type(ng.flavor_id), "admin_user": ng.image_username, "user_data": userdata }) if ng.boot_from_volume: resources.update(self._get_bootable_volume(ng)) properties["block_device_mapping"] = [ {"device_name": "vda", "volume_id": {"get_resource": "bootable_volume"}, "delete_on_termination": "true"}] else: properties.update({"image": ng.get_image_id()}) resources.update({ INSTANCE_RESOURCE_NAME: { "type": "OS::Nova::Server", "properties": properties } }) resources.update(self._serialize_designate_records()) resources.update(self._serialize_designate_reverse_records()) resources.update(self._serialize_volume(ng)) resources.update(self._serialize_wait_condition(ng)) return resources def _get_bootable_volume(self, node_group): node_group_flavor = nova.get_flavor(id=node_group.flavor_id) image_size = node_group_flavor.disk properties = {} properties["size"] = image_size properties["image"] = node_group.get_image_id() if node_group.boot_volume_type: properties["volume_type"] = node_group.boot_volume_type if node_group.boot_volume_availability_zone: properties["availability_zone"] = ( node_group.boot_volume_availability_zone ) if node_group.boot_volume_local_to_instance: properties["scheduler_hints"] = { "local_to_instance": {"get_param": "instance"}} return { "bootable_volume": { "type": "OS::Cinder::Volume", "properties": properties } } def _serialize_wait_condition(self, ng): if not CONF.heat_enable_wait_condition: return {} return { _get_wc_handle_name(ng.name): { "type": "OS::Heat::WaitConditionHandle" }, _get_wc_waiter_name(ng.name): { "type": "OS::Heat::WaitCondition", "depends_on": INSTANCE_RESOURCE_NAME, "properties": { "timeout": self._get_wait_condition_timeout(ng), "handle": {"get_resource": _get_wc_handle_name(ng.name)} } } } def _serialize_neutron_floating(self, ng): return { "floating_ip": { "type": "OS::Neutron::FloatingIP", "properties": { "floating_network_id": ng.floating_ip_pool, "port_id": {"get_resource": "port"} } } } def _serialize_port(self, port_name, fixed_net_id, security_groups): properties = { "network_id": fixed_net_id, "replacement_policy": "AUTO", "name": port_name } if security_groups: properties["security_groups"] = security_groups return { "port": { "type": "OS::Neutron::Port", "properties": properties, } } def _serialize_volume(self, ng): if not ng.volumes_size or not ng.volumes_per_node: return {} volume_file_name = "file://" + ng.name + "-volume.yaml" self.files[volume_file_name] = self._serialize_volume_file(ng) return { ng.name: { "type": "OS::Heat::ResourceGroup", "properties": { "count": ng.volumes_per_node, "resource_def": { "type": volume_file_name, "properties": { "volume_index": "%index%", "instance_index": {"get_param": "instance_index"}, "instance": {"get_resource": INSTANCE_RESOURCE_NAME}} } } } } def _serialize_volume_file(self, ng): volume_name = _get_volume_name(ng) properties = { "name": volume_name, "size": six.text_type(ng.volumes_size) } if ng.volume_type: properties["volume_type"] = ng.volume_type if ng.volumes_availability_zone: properties["availability_zone"] = ng.volumes_availability_zone if ng.volume_local_to_instance: properties["scheduler_hints"] = { "local_to_instance": {"get_param": "instance"}} return yaml.safe_dump({ "heat_template_version": heat_common.HEAT_TEMPLATE_VERSION, "description": self._volume_for_node_group_description(ng), "parameters": { "volume_index": { "type": "string" }, "instance_index": { "type": "string" }, "instance": { "type": "string" }}, "resources": { "volume": { "type": "OS::Cinder::Volume", "properties": properties }, "volume-attachment": { "type": "OS::Cinder::VolumeAttachment", "properties": { "instance_uuid": {"get_param": "instance"}, "volume_id": {"get_resource": "volume"}, } }}, "outputs": {} }) def _get_security_groups(self, node_group): node_group_sg = list(node_group.security_groups or []) if node_group.auto_security_group: node_group_sg += [ {"get_param": AUTO_SECURITY_GROUP_PARAM_NAME} ] return node_group_sg def _serialize_aa_server_group(self, server_group_index): server_group_name = _get_aa_group_name(self.cluster, server_group_index) return { server_group_name: { "type": "OS::Nova::ServerGroup", "properties": { "name": server_group_name, "policies": ["anti-affinity"] } } } def get_node_group_instances(self, node_group): cluster = node_group.cluster outputs = h.get_stack_outputs(cluster) for output in outputs: if output['output_key'] == node_group.name + "-instances": return output["output_value"] return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/networks.py0000664000175000017500000000436300000000000020213 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import six from sahara import conductor as c from sahara import context from sahara.utils.openstack import nova conductor = c.API CONF = cfg.CONF def init_instances_ips(instance): """Extracts internal and management ips. As internal ip will be used the first ip from the nova networks CIDRs. If use_floating_ip flag is set than management ip will be the first non-internal ip. """ server = nova.get_instance_info(instance) management_ip = None internal_ip = None for addresses in six.itervalues(server.addresses): # selects IPv4 preferentially for address in sorted(addresses, key=lambda addr: addr['version']): if address['OS-EXT-IPS:type'] == 'fixed': internal_ip = internal_ip or address['addr'] else: management_ip = management_ip or address['addr'] # tmckay-fp okay # conf.use_floating_ips becomes # "use a floating ip for the management ip if one is defined" # assignment comes from nova conf setting, or from floating_ip_pool value # tmckay-fp log an extra warning here in the neutron # case that the node group has a floating ip pool but # we don't have a management ip yet ... cluster = instance.cluster if (not CONF.use_floating_ips or not management_ip or (cluster.has_proxy_gateway() and not instance.node_group.is_proxy_gateway)): management_ip = internal_ip conductor.instance_update(context.ctx(), instance, {"management_ip": management_ip, "internal_ip": internal_ip}) return internal_ip and management_ip ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/ntp_service.py0000664000175000017500000000721000000000000020652 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from sahara import conductor as cond from sahara import context from sahara.plugins import provisioning as common_configs from sahara.utils import cluster as c_u CONF = cfg.CONF LOG = logging.getLogger(__name__) conductor = cond.API ntp_opts = [ cfg.StrOpt('default_ntp_server', default="pool.ntp.org", help="Default ntp server for time sync") ] CONF.register_opts(ntp_opts) def _sudo(remote, cmd): remote.execute_command(cmd, run_as_root=True) def _restart_ntp(remote): distrib = remote.get_os_distrib() cmd = "service %s restart" if distrib == 'ubuntu': cmd = cmd % "ntp" else: cmd = cmd % "ntpd" _sudo(remote, cmd) def _verify_installation(remote): distrib = remote.get_os_distrib() if distrib == 'ubuntu': return remote.execute_command("dpkg -s ntp") else: return remote.execute_command("rpm -q ntp") def _check_ntp_installed(remote): try: exit_code, stdout = _verify_installation(remote) if exit_code != 0: return False return True except Exception: return False def _configure_ntp_on_instance(instance, url): with context.set_current_instance_id(instance.instance_id): LOG.debug("Configuring ntp server") with instance.remote() as r: if not _check_ntp_installed(r): # missing ntp service LOG.warning("Unable to configure NTP service") return r.prepend_to_file( "/etc/ntp.conf", "server {url} iburst\n".format(url=url), run_as_root=True) _restart_ntp(r) try: _sudo(r, "ntpdate -u {url}".format(url=url)) except Exception as e: LOG.debug("Update time on VM failed with error: %s", e) LOG.info("NTP successfully configured") def is_ntp_enabled(cluster): target = common_configs.NTP_ENABLED.applicable_target name = common_configs.NTP_ENABLED.name cl_configs = cluster.cluster_configs if target not in cl_configs or name not in cl_configs[target]: return common_configs.NTP_ENABLED.default_value return cl_configs[target][name] def retrieve_ntp_server_url(cluster): target = common_configs.NTP_URL.applicable_target name = common_configs.NTP_URL.name cl_configs = cluster.cluster_configs if target not in cl_configs or name not in cl_configs[target]: return CONF.default_ntp_server return cl_configs[target][name] def configure_ntp(cluster_id, instance_ids=None): cluster = conductor.cluster_get(context.ctx(), cluster_id) if not is_ntp_enabled(cluster): LOG.debug("Don't configure NTP on cluster") return instances = c_u.get_instances(cluster, instance_ids) url = retrieve_ntp_server_url(cluster) with context.ThreadGroup() as tg: for instance in instances: tg.spawn("configure-ntp-%s" % instance.instance_name, _configure_ntp_on_instance, instance, url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/ops.py0000664000175000017500000004411500000000000017137 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_utils import uuidutils import six from sahara import conductor as c from sahara import context from sahara import exceptions from sahara.i18n import _ from sahara.plugins import base as plugin_base from sahara.plugins import utils as u from sahara.service.edp import job_manager from sahara.service.edp.utils import shares from sahara.service.health import verification_base as ver_base from sahara.service import ntp_service from sahara.service import trusts from sahara.utils import cluster as c_u from sahara.utils.openstack import nova from sahara.utils import remote from sahara.utils import rpc as rpc_utils conductor = c.API CONF = cfg.CONF LOG = logging.getLogger(__name__) INFRA = None def setup_ops(engine): global INFRA INFRA = engine class LocalOps(object): def provision_cluster(self, cluster_id): context.spawn("cluster-creating-%s" % cluster_id, _provision_cluster, cluster_id) def provision_scaled_cluster(self, cluster_id, node_group_id_map, node_group_instance_map=None): context.spawn("cluster-scaling-%s" % cluster_id, _provision_scaled_cluster, cluster_id, node_group_id_map, node_group_instance_map) def terminate_cluster(self, cluster_id, force=False): context.spawn("cluster-terminating-%s" % cluster_id, terminate_cluster, cluster_id, force) def run_edp_job(self, job_execution_id): context.spawn("Starting Job Execution %s" % job_execution_id, _run_edp_job, job_execution_id) def cancel_job_execution(self, job_execution_id): context.spawn("Canceling Job Execution %s" % job_execution_id, _cancel_job_execution, job_execution_id) def delete_job_execution(self, job_execution_id): context.spawn("Deleting Job Execution %s" % job_execution_id, _delete_job_execution, job_execution_id) def handle_verification(self, cluster_id, values): context.spawn('Handling Verification for cluster %s' % cluster_id, _handle_verification, cluster_id, values) def get_engine_type_and_version(self): return INFRA.get_type_and_version() def job_execution_suspend(self, job_execution_id): context.spawn("Suspend Job Execution %s" % job_execution_id, _suspend_job_execution, job_execution_id) class RemoteOps(rpc_utils.RPCClient): def __init__(self): target = messaging.Target(topic='sahara-ops', version='1.0') super(RemoteOps, self).__init__(target) def provision_cluster(self, cluster_id): self.cast('provision_cluster', cluster_id=cluster_id) def update_keypair(self, cluster_id): self.cast('update_keypair', cluster_id=cluster_id) def provision_scaled_cluster(self, cluster_id, node_group_id_map, node_group_instance_map=None): self.cast('provision_scaled_cluster', cluster_id=cluster_id, node_group_id_map=node_group_id_map, node_group_instance_map=node_group_instance_map) def terminate_cluster(self, cluster_id, force=False): self.cast('terminate_cluster', cluster_id=cluster_id, force=force) def run_edp_job(self, job_execution_id): self.cast('run_edp_job', job_execution_id=job_execution_id) def cancel_job_execution(self, job_execution_id): self.cast('cancel_job_execution', job_execution_id=job_execution_id) def delete_job_execution(self, job_execution_id): self.cast('delete_job_execution', job_execution_id=job_execution_id) def handle_verification(self, cluster_id, values): self.cast('handle_verification', cluster_id=cluster_id, values=values) def get_engine_type_and_version(self): return self.call('get_engine_type_and_version') def job_execution_suspend(self, job_execution_id): self.cast('job_execution_suspend', job_execution_id=job_execution_id) def request_context(func): @functools.wraps(func) def wrapped(self, ctx, *args, **kwargs): context.set_ctx(context.Context(**ctx)) return func(self, *args, **kwargs) return wrapped class OpsServer(rpc_utils.RPCServer): def __init__(self): target = messaging.Target(topic='sahara-ops', server=uuidutils.generate_uuid(), version='1.0') super(OpsServer, self).__init__(target) @request_context def provision_cluster(self, cluster_id): _provision_cluster(cluster_id) @request_context def update_keypair(self, cluster_id): _update_keypair(cluster_id) @request_context def provision_scaled_cluster(self, cluster_id, node_group_id_map, node_group_instance_map=None): _provision_scaled_cluster(cluster_id, node_group_id_map, node_group_instance_map) @request_context def terminate_cluster(self, cluster_id, force=False): terminate_cluster(cluster_id, force) @request_context def run_edp_job(self, job_execution_id): _run_edp_job(job_execution_id) @request_context def cancel_job_execution(self, job_execution_id): _cancel_job_execution(job_execution_id) @request_context def delete_job_execution(self, job_execution_id): _delete_job_execution(job_execution_id) @request_context def handle_verification(self, cluster_id, values): _handle_verification(cluster_id, values) @request_context def get_engine_type_and_version(self): return INFRA.get_type_and_version() @request_context def job_execution_suspend(self, job_execution_id): _suspend_job_execution(job_execution_id) def _setup_trust_for_cluster(cluster): cluster = conductor.cluster_get(context.ctx(), cluster) trusts.create_trust_for_cluster(cluster) trusts.use_os_admin_auth_token(cluster) def ops_error_handler(description): def decorator(f): @functools.wraps(f) def wrapper(cluster_id, *args, **kwds): ctx = context.ctx() try: # Clearing status description before executing c_u.change_cluster_status_description(cluster_id, "") f(cluster_id, *args, **kwds) except Exception as ex: # something happened during cluster operation cluster = conductor.cluster_get(ctx, cluster_id) # check if cluster still exists (it might have been removed) if (cluster is None or cluster.status == c_u.CLUSTER_STATUS_DELETING): LOG.debug("Cluster was deleted or marked for deletion. " "Canceling current operation.") return msg = six.text_type(ex) LOG.exception("Error during operating on cluster (reason: " "{reason})".format(reason=msg)) try: # trying to rollback desc = description.format(reason=msg) if _rollback_cluster(cluster, ex): c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ACTIVE, desc) else: c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ERROR, desc) except Exception as rex: cluster = conductor.cluster_get(ctx, cluster_id) # check if cluster still exists (it might have been # removed during rollback) if (cluster is None or cluster.status == c_u.CLUSTER_STATUS_DELETING): LOG.debug("Cluster was deleted or marked for deletion." " Canceling current operation.") return LOG.exception( "Error during rollback of cluster (reason:" " {reason})".format(reason=six.text_type(rex))) desc = "{0}, {1}".format(msg, six.text_type(rex)) c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ERROR, description.format(reason=desc)) return wrapper return decorator def _rollback_cluster(cluster, reason): _setup_trust_for_cluster(cluster) context.set_step_type(_("Engine: rollback cluster")) return INFRA.rollback_cluster(cluster, reason) def _prepare_provisioning(cluster_id): ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) for nodegroup in cluster.node_groups: update_dict = {} update_dict["image_username"] = INFRA.get_node_group_image_username( nodegroup) conductor.node_group_update(ctx, nodegroup, update_dict) _setup_trust_for_cluster(cluster) cluster = conductor.cluster_get(ctx, cluster_id) return ctx, cluster, plugin def _update_sahara_info(ctx, cluster): sahara_info = { 'infrastructure_engine': INFRA.get_type_and_version(), 'remote': remote.get_remote_type_and_version()} return conductor.cluster_update( ctx, cluster, {'sahara_info': sahara_info}) @ops_error_handler( _("Creating cluster failed for the following reason(s): {reason}")) def _provision_cluster(cluster_id): ctx, cluster, plugin = _prepare_provisioning(cluster_id) cluster = _update_sahara_info(ctx, cluster) # updating cluster infra cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_INFRAUPDATING) plugin.update_infra(cluster) # creating instances and configuring them cluster = conductor.cluster_get(ctx, cluster_id) context.set_step_type(_("Engine: create cluster")) INFRA.create_cluster(cluster) ntp_service.configure_ntp(cluster_id) # configure cluster cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_CONFIGURING) context.set_step_type(_("Plugin: configure cluster")) if hasattr(plugin, 'validate_images'): plugin.validate_images(cluster, test_only=False) shares.mount_shares(cluster) plugin.configure_cluster(cluster) # starting prepared and configured cluster cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_STARTING) context.set_step_type(_("Plugin: start cluster")) plugin.start_cluster(cluster) # cluster is now up and ready cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_ACTIVE) # schedule execution pending job for cluster for je in conductor.job_execution_get_all(ctx, cluster_id=cluster.id): job_manager.run_job(je.id) _refresh_health_for_cluster(cluster_id) def _specific_inst_to_delete(node_group, node_group_instance_map=None): if node_group_instance_map: if node_group.id in node_group_instance_map: return True return False @ops_error_handler( _("Scaling cluster failed for the following reason(s): {reason}")) def _provision_scaled_cluster(cluster_id, node_group_id_map, node_group_instance_map=None): """Provision scaled cluster. :param cluster_id: Id of cluster to be scaled. :param node_group_id_map: Dictionary in the format node_group_id: number of instances. :param node_group_instance_map: Specifies the instances to be removed in each node group. """ ctx, cluster, plugin = _prepare_provisioning(cluster_id) # Decommissioning surplus nodes with the plugin cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_DECOMMISSIONING) try: instances_to_delete = [] for node_group in cluster.node_groups: ng_inst_to_delete_count = 0 # new_count is the new number of instance on the current node group new_count = node_group_id_map[node_group.id] if new_count < node_group.count: # Adding selected instances to delete to the list if _specific_inst_to_delete(node_group, node_group_instance_map): for instance_ref in node_group_instance_map[node_group.id]: instances_to_delete.append(_get_instance_obj( node_group.instances, instance_ref)) ng_inst_to_delete_count += 1 # Adding random instances to the list when the number of # specific instances does not equals the difference between the # current count and the new count of instances. while node_group.count - new_count > ng_inst_to_delete_count: instances_to_delete.append(_get_random_instance_from_ng( node_group.instances, instances_to_delete)) ng_inst_to_delete_count += 1 if instances_to_delete: context.set_step_type(_("Plugin: decommission cluster")) plugin.decommission_nodes(cluster, instances_to_delete) # Scaling infrastructure cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_SCALING) context.set_step_type(_("Engine: scale cluster")) instance_ids = INFRA.scale_cluster(cluster, node_group_id_map, instances_to_delete) # Setting up new nodes with the plugin if instance_ids: ntp_service.configure_ntp(cluster_id, instance_ids) cluster = c_u.change_cluster_status( cluster, c_u.CLUSTER_STATUS_CONFIGURING) instances = c_u.get_instances(cluster, instance_ids) context.set_step_type(_("Plugin: scale cluster")) plugin.scale_cluster(cluster, instances) c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE) _refresh_health_for_cluster(cluster_id) except Exception as e: c_u.change_cluster_status(cluster, c_u.CLUSTER_STATUS_ACTIVE, six.text_type(e)) def _get_instance_obj(instances, instance_ref): for instance in instances: if (instance.instance_id == instance_ref or instance.instance_name == instance_ref): return instance raise exceptions.NotFoundException(str(instance_ref), _("Instance %s not found")) def _get_random_instance_from_ng(instances, instances_to_delete): # instances list doesn't order by creating date, so we should # sort it to make sure deleted instances same as heat deleted. insts = sorted(instances, key=lambda x: int(x['instance_name'].split('-')[-1])) for instance in reversed(insts): if instance not in instances_to_delete: return instance @ops_error_handler( _("Terminating cluster failed for the following reason(s): {reason}")) def terminate_cluster(cluster_id, force=False): ctx = context.ctx() _setup_trust_for_cluster(cluster_id) job_manager.update_job_statuses(cluster_id=cluster_id) cluster = conductor.cluster_get(ctx, cluster_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) context.set_step_type(_("Plugin: shutdown cluster")) plugin.on_terminate_cluster(cluster) context.set_step_type(_("Engine: shutdown cluster")) INFRA.shutdown_cluster(cluster, force) trusts.delete_trust_from_cluster(cluster) conductor.cluster_destroy(ctx, cluster) def _run_edp_job(job_execution_id): job_manager.run_job(job_execution_id) def _suspend_job_execution(job_execution_id): job_manager.suspend_job(job_execution_id) def _cancel_job_execution(job_execution_id): job_manager.cancel_job(job_execution_id) def _delete_job_execution(job_execution_id): try: job_execution = job_manager.cancel_job(job_execution_id) if not job_execution: # job_execution was deleted already, nothing to do return except exceptions.CancelingFailed: LOG.error("Job execution can't be cancelled in time. " "Deleting it anyway.") conductor.job_execution_destroy(context.ctx(), job_execution_id) def _refresh_health_for_cluster(cluster_id): st_dict = {'verification': {'status': 'START'}} try: ver_base.validate_verification_start(cluster_id) ver_base.handle_verification(cluster_id, st_dict) except ver_base.CannotVerifyError: LOG.debug("Cannot verify cluster because verifications are disabled " "or cluster already is verifying") except Exception: # if occasional error occurred, there is no reason to move # cluster into error state LOG.debug("Skipping refreshing cluster health") ver_base.clean_verification_data(cluster_id) def _handle_verification(cluster_id, values): ver_base.handle_verification(cluster_id, values) def _update_keypair(cluster_id): ctx = context.ctx() cluster = conductor.cluster_get(ctx, cluster_id) keypair_name = cluster.user_keypair_id key = nova.get_keypair(keypair_name) nodes = u.get_instances(cluster) for node in nodes: with node.remote() as r: r.execute_command( "echo {keypair} >> ~/.ssh/authorized_keys". format(keypair=key.public_key)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/periodic.py0000664000175000017500000002447100000000000020137 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import random from oslo_config import cfg from oslo_log import log from oslo_service import periodic_task from oslo_service import threadgroup from oslo_utils import timeutils import six from sahara import conductor as c from sahara import context from sahara.service.api import v10 as api from sahara.service import coordinator from sahara.service.edp import job_manager from sahara.service.health import verification_base as vb from sahara.service import trusts from sahara.utils import cluster as c_u from sahara.utils import edp from sahara.utils import proxy as p LOG = log.getLogger(__name__) periodic_opts = [ cfg.BoolOpt('periodic_enable', default=True, help='Enable periodic tasks.'), cfg.IntOpt('periodic_fuzzy_delay', default=60, help='Range in seconds to randomly delay when starting the' ' periodic task scheduler to reduce stampeding.' ' (Disable by setting to 0).'), cfg.IntOpt('periodic_interval_max', default=60, help='Max interval size between periodic tasks execution in ' 'seconds.'), cfg.IntOpt('min_transient_cluster_active_time', default=30, help='Minimal "lifetime" in seconds for a transient cluster. ' 'Cluster is guaranteed to be "alive" within this time ' 'period.'), cfg.IntOpt('cleanup_time_for_incomplete_clusters', default=0, help='Maximal time (in hours) for clusters allowed to be in ' 'states other than "Active", "Deleting" or "Error". If a ' 'cluster is not in "Active", "Deleting" or "Error" state ' 'and last update of it was longer than ' '"cleanup_time_for_incomplete_clusters" hours ago then it ' 'will be deleted automatically. (0 value means that ' 'automatic clean up is disabled).'), cfg.StrOpt('periodic_coordinator_backend_url', help='The backend URL to use for distributed periodic tasks ' 'coordination.'), cfg.IntOpt('periodic_workers_number', default=1, help='Number of threads to run periodic tasks.'), ] CONF = cfg.CONF CONF.register_opts(periodic_opts) conductor = c.API def get_time_since_last_update(cluster): cluster_updated_at = timeutils.normalize_time( timeutils.parse_isotime(cluster.updated_at)) current_time = timeutils.utcnow() spacing = timeutils.delta_seconds(cluster_updated_at, current_time) return spacing def terminate_cluster(ctx, cluster, description): if CONF.use_identity_api_v3 and cluster.trust_id: trusts.use_os_admin_auth_token(cluster) context.set_current_cluster_id(cluster.id) LOG.debug('Terminating {description} cluster ' 'in "{status}" state'.format(status=cluster.status, description=description)) try: api.terminate_cluster(cluster.id) except Exception as e: LOG.warning( 'Failed to terminate {description} cluster in "{status}" ' 'state: {error}.'.format(error=six.text_type(e), status=cluster.status, description=description)) else: if (cluster.status != c_u.CLUSTER_STATUS_AWAITINGTERMINATION): conductor.cluster_update( ctx, cluster, {'status': c_u.CLUSTER_STATUS_AWAITINGTERMINATION}) def set_context(func): @functools.wraps(func) def handler(self, ctx): ctx = context.get_admin_context() context.set_ctx(ctx) func(self, ctx) context.set_ctx(None) return handler def _make_periodic_tasks(): '''Return the periodic tasks object This function creates the periodic tasks class object, it is wrapped in this manner to allow easier control of enabling and disabling tasks. ''' zombie_task_spacing = 300 if CONF.use_domain_for_proxy_users else -1 heartbeat_interval = (CONF.coordinator_heartbeat_interval if CONF.periodic_coordinator_backend_url else -1) class SaharaPeriodicTasks(periodic_task.PeriodicTasks): hr = coordinator.HashRing( CONF.periodic_coordinator_backend_url, 'sahara-periodic-tasks') def __init__(self): super(SaharaPeriodicTasks, self).__init__(CONF) @periodic_task.periodic_task( spacing=heartbeat_interval, run_immediately=True) @set_context def heartbeat(self, ctx): self.hr.heartbeat() @periodic_task.periodic_task(spacing=45) @set_context def update_job_statuses(self, ctx): LOG.debug('Updating job statuses') all_je = conductor.job_execution_get_all(ctx, end_time=None) je_to_manage = self.hr.get_subset(all_je) for job in je_to_manage: job_manager.update_job_status(job.id) @periodic_task.periodic_task(spacing=90) @set_context def terminate_unneeded_transient_clusters(self, ctx): LOG.debug('Terminating unneeded transient clusters') all_clusters = conductor.cluster_get_all( ctx, status=c_u.CLUSTER_STATUS_ACTIVE, is_transient=True) clusters_to_manage = self.hr.get_subset(all_clusters) for cluster in clusters_to_manage: jc = conductor.job_execution_count(ctx, end_time=None, cluster_id=cluster.id) if jc > 0: continue spacing = get_time_since_last_update(cluster) if spacing < CONF.min_transient_cluster_active_time: continue terminate_cluster(ctx, cluster, description='transient') # Add event log info cleanup context.ctx().current_instance_info = context.InstanceInfo() @periodic_task.periodic_task(spacing=zombie_task_spacing) @set_context def check_for_zombie_proxy_users(self, ctx): all_users = p.proxy_domain_users_list() users_to_manage = self.hr.get_subset(all_users) for user in users_to_manage: if user.name.startswith('job_'): je_id = user.name[4:] je = conductor.job_execution_get(ctx, je_id) if je is None or (je.info['status'] in edp.JOB_STATUSES_TERMINATED): LOG.debug('Found zombie proxy user {username}'.format( username=user.name)) p.proxy_user_delete(user_id=user.id) @periodic_task.periodic_task(spacing=3600) @set_context def terminate_incomplete_clusters(self, ctx): if CONF.cleanup_time_for_incomplete_clusters <= 0: return LOG.debug('Terminating old clusters in non-final state') # NOTE(alazarev) Retrieving all clusters once in hour for now. # Criteria support need to be implemented in sahara db API to # have SQL filtering. all_clusters = [ cluster for cluster in conductor.cluster_get_all(ctx) if (cluster.status not in [ c_u.CLUSTER_STATUS_ACTIVE, c_u.CLUSTER_STATUS_ERROR, c_u.CLUSTER_STATUS_DELETING]) ] clusters_to_manage = self.hr.get_subset(all_clusters) for cluster in clusters_to_manage: spacing = get_time_since_last_update(cluster) if spacing < CONF.cleanup_time_for_incomplete_clusters * 3600: continue terminate_cluster(ctx, cluster, description='incomplete') # Add event log info cleanup context.ctx().current_instance_info = context.InstanceInfo() @periodic_task.periodic_task( spacing=vb.get_verification_periodic_interval()) @set_context def run_verifications(self, ctx): LOG.debug("Executing health checks for the clusters") start_dict = {'verification': {'status': 'START'}} all_clusters = conductor.cluster_get_all( ctx, status=c_u.CLUSTER_STATUS_ACTIVE) clusters_to_manage = self.hr.get_subset(all_clusters) for cluster in clusters_to_manage: try: vb.validate_verification_start(cluster) api.update_cluster(cluster.id, start_dict) except vb.CannotVerifyError: LOG.debug("Skipping running verification " "on the cluster %s", cluster.name) return SaharaPeriodicTasks() def setup(): if CONF.periodic_enable: if CONF.periodic_fuzzy_delay: initial_delay = random.randint(0, CONF.periodic_fuzzy_delay) LOG.debug("Starting periodic tasks with initial delay {seconds} " "seconds".format(seconds=initial_delay)) else: initial_delay = None tg = threadgroup.ThreadGroup() workers_number = (CONF.periodic_workers_number if CONF.periodic_coordinator_backend_url else 1) for t in range(workers_number): pt = _make_periodic_tasks() tg.add_dynamic_timer( pt.run_periodic_tasks, initial_delay=initial_delay, periodic_interval_max=CONF.periodic_interval_max, context=None) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/quotas.py0000664000175000017500000001406100000000000017647 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import six from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils.openstack import base as b from sahara.utils.openstack import cinder as cinder_client from sahara.utils.openstack import neutron as neutron_client from sahara.utils.openstack import nova as nova_client CONF = cfg.CONF UNLIMITED = 'unlimited' def _is_unlimited(limit): return limit == -1 def _get_zero_limits(): return { 'ram': 0, 'cpu': 0, 'instances': 0, 'floatingips': 0, 'security_groups': 0, 'security_group_rules': 0, 'ports': 0, 'volumes': 0, 'volume_gbs': 0 } def check_cluster(cluster): req_limits = _get_req_cluster_limits(cluster) _check_limits(req_limits) def check_scaling(cluster, to_be_enlarged, additional): req_limits = _get_req_scaling_limits(cluster, to_be_enlarged, additional) _check_limits(req_limits) def _check_limits(req_limits): limits_name_map = { 'ram': _("RAM"), 'cpu': _("VCPU"), 'instances': _("instance"), 'floatingips': _("floating ip"), 'security_groups': _("security group"), 'security_group_rules': _("security group rule"), 'ports': _("port"), 'volumes': _("volume"), 'volume_gbs': _("volume storage") } avail_limits = _get_avail_limits() for quota, quota_name in six.iteritems(limits_name_map): if avail_limits[quota] != UNLIMITED: if avail_limits[quota] < req_limits[quota]: raise ex.QuotaException(quota_name, req_limits[quota], avail_limits[quota]) def _get_req_cluster_limits(cluster): req_limits = _get_zero_limits() for ng in cluster.node_groups: _update_limits_for_ng(req_limits, ng, ng.count) return req_limits def _get_req_scaling_limits(cluster, to_be_enlarged, additional): ng_id_map = to_be_enlarged.copy() ng_id_map.update(additional) req_limits = _get_zero_limits() for ng in cluster.node_groups: if ng_id_map.get(ng.id): _update_limits_for_ng(req_limits, ng, ng_id_map[ng.id] - ng.count) return req_limits def _update_limits_for_ng(limits, ng, count): sign = lambda x: (1, -1)[x < 0] nova = nova_client.client() limits['instances'] += count flavor = b.execute_with_retries(nova.flavors.get, ng.flavor_id) limits['ram'] += flavor.ram * count limits['cpu'] += flavor.vcpus * count # tmckay-fp this is fine, it will be zero without it if ng.floating_ip_pool: limits['floatingips'] += count if ng.volumes_per_node: limits['volumes'] += ng.volumes_per_node * count limits['volume_gbs'] += ng.volumes_per_node * ng.volumes_size * count if ng.auto_security_group: limits['security_groups'] += sign(count) # NOTE: +3 - all traffic for private network limits['security_group_rules'] += ( (len(ng.open_ports) + 3) * sign(count)) limits['ports'] += count def _get_avail_limits(): limits = _get_zero_limits() limits.update(_get_nova_limits()) limits.update(_get_neutron_limits()) if cinder_client.check_cinder_exists(): limits.update(_get_cinder_limits()) return limits def _sub_limit(total, used): if _is_unlimited(total): return UNLIMITED else: return total - used def _get_nova_limits(): limits = {} nova = nova_client.client() lim = b.execute_with_retries(nova.limits.get).to_dict()['absolute'] limits['ram'] = _sub_limit(lim['maxTotalRAMSize'], lim['totalRAMUsed']) limits['cpu'] = _sub_limit(lim['maxTotalCores'], lim['totalCoresUsed']) limits['instances'] = _sub_limit(lim['maxTotalInstances'], lim['totalInstancesUsed']) return limits def _get_neutron_limits(): limits = {} neutron = neutron_client.client() tenant_id = context.ctx().tenant_id total_lim = b.execute_with_retries(neutron.show_quota, tenant_id)['quota'] # tmckay-fp here we would just get the limits all the time usage_fip = b.execute_with_retries( neutron.list_floatingips, tenant_id=tenant_id)['floatingips'] limits['floatingips'] = _sub_limit(total_lim['floatingip'], len(usage_fip)) usage_sg = b.execute_with_retries( neutron.list_security_groups, tenant_id=tenant_id).get( 'security_groups', []) limits['security_groups'] = _sub_limit(total_lim['security_group'], len(usage_sg)) usage_sg_rules = b.execute_with_retries( neutron.list_security_group_rules, tenant_id=tenant_id).get( 'security_group_rules', []) limits['security_group_rules'] = _sub_limit( total_lim['security_group_rule'], len(usage_sg_rules)) usage_ports = b.execute_with_retries( neutron.list_ports, tenant_id=tenant_id)['ports'] limits['ports'] = _sub_limit(total_lim['port'], len(usage_ports)) return limits def _get_cinder_limits(): avail_limits = {} cinder = cinder_client.client() lim = {} for l in b.execute_with_retries(cinder.limits.get).absolute: lim[l.name] = l.value avail_limits['volumes'] = _sub_limit(lim['maxTotalVolumes'], lim['totalVolumesUsed']) avail_limits['volume_gbs'] = _sub_limit(lim['maxTotalVolumeGigabytes'], lim['totalGigabytesUsed']) return avail_limits ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/sessions.py0000664000175000017500000001373300000000000020206 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import session as keystone from oslo_config import cfg from oslo_log import log as logging from sahara import exceptions as ex from sahara.i18n import _ CONF = cfg.CONF LOG = logging.getLogger(__name__) _SESSION_CACHE = None SESSION_TYPE_CINDER = 'cinder' SESSION_TYPE_KEYSTONE = 'keystone' SESSION_TYPE_NEUTRON = 'neutron' SESSION_TYPE_NOVA = 'nova' SESSION_TYPE_GLANCE = 'glance' SESSION_TYPE_INSECURE = 'insecure' SESSION_TYPE_HEAT = 'heat' def cache(): global _SESSION_CACHE if not _SESSION_CACHE: _SESSION_CACHE = SessionCache() return _SESSION_CACHE class SessionCache(object): '''A cache of keystone Session objects When a requested Session is not currently cached, it will be acquired from specific information in this module. Sessions should be referenced by their OpenStack project name and not the service name, this is to allow for multiple service implementations while retaining the ability to generate Session objects. In all cases, the constant values in this module should be used to communicate the session type. ''' def __init__(self): '''create a new SessionCache''' self._sessions = {} self._session_funcs = { SESSION_TYPE_CINDER: self.get_cinder_session, SESSION_TYPE_KEYSTONE: self.get_keystone_session, SESSION_TYPE_NEUTRON: self.get_neutron_session, SESSION_TYPE_NOVA: self.get_nova_session, SESSION_TYPE_GLANCE: self.get_glance_session, SESSION_TYPE_INSECURE: self.get_insecure_session, SESSION_TYPE_HEAT: self.get_heat_session, } def _set_session(self, session_type, session): '''Set the session for a given type. :param session_type: the type of session to set. :param session: the session to associate with the type ''' self._sessions[session_type] = session def get_session(self, session_type=SESSION_TYPE_INSECURE): '''Return a Session for the requested type :param session_type: the type of Session to get, if None an insecure session will be returned. :raises SaharaException: if the requested session type is not found. ''' session_function = self._session_funcs.get(session_type) if session_function: return session_function() else: LOG.error('Requesting an unknown session type (type: {type})'. format(type=session_type)) raise ex.SaharaException( _('Session type {type} not recognized'). format(type=session_type)) def get_insecure_session(self): session = self._sessions.get(SESSION_TYPE_INSECURE) if not session: session = keystone.Session(verify=False) self._set_session(SESSION_TYPE_INSECURE, session) return session def get_cinder_session(self): session = self._sessions.get(SESSION_TYPE_CINDER) if not session: if not CONF.cinder.api_insecure: session = keystone.Session( verify=CONF.cinder.ca_file or True) else: session = self.get_insecure_session() self._set_session(SESSION_TYPE_CINDER, session) return session def get_keystone_session(self): session = self._sessions.get(SESSION_TYPE_KEYSTONE) if not session: if not CONF.keystone.api_insecure: session = keystone.Session( verify=CONF.keystone.ca_file or True) else: session = self.get_insecure_session() self._set_session(SESSION_TYPE_KEYSTONE, session) return session def get_neutron_session(self): session = self._sessions.get(SESSION_TYPE_NEUTRON) if not session: if not CONF.neutron.api_insecure: session = keystone.Session( verify=CONF.neutron.ca_file or True) else: session = self.get_insecure_session() self._set_session(SESSION_TYPE_NEUTRON, session) return session def get_nova_session(self): session = self._sessions.get(SESSION_TYPE_NOVA) if not session: if not CONF.nova.api_insecure: session = keystone.Session( verify=CONF.nova.ca_file or True) else: session = self.get_insecure_session() self._set_session(SESSION_TYPE_NOVA, session) return session def get_glance_session(self): session = self._sessions.get(SESSION_TYPE_GLANCE) if not session: if not CONF.glance.api_insecure: session = keystone.Session(verify=CONF.glance.ca_file or True) else: session = self.get_insecure_session() self._set_session(SESSION_TYPE_GLANCE, session) return session def get_heat_session(self): session = self._sessions.get(SESSION_TYPE_HEAT) if not session: if not CONF.heat.api_insecure: session = keystone.Session(verify=CONF.heat.ca_file or True) else: session = self.get_insecure_session() self._set_session(SESSION_TYPE_HEAT, session) return session def token_for_auth(self, auth): return self.get_keystone_session().get_auth_headers(auth).get( 'X-Auth-Token') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/trusts.py0000664000175000017500000001471100000000000017701 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json import six from sahara import conductor as c from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils.openstack import keystone conductor = c.API CONF = cfg.CONF LOG = logging.getLogger(__name__) def create_trust(trustor, trustee, role_names, impersonation=True, project_id=None, allow_redelegation=False): '''Create a trust and return it's identifier :param trustor: The user delegating the trust, this is an auth plugin. :param trustee: The user consuming the trust, this is an auth plugin. :param role_names: A list of role names to be assigned. :param impersonation: Should the trustee impersonate trustor, default is True. :param project_id: The project that the trust will be scoped into, default is the trustor's project id. :param allow_redelegation: Allow redelegation parameter for cluster trusts. :returns: A valid trust id. :raises CreationFailed: If the trust cannot be created. ''' if project_id is None: project_id = keystone.project_id_from_auth(trustor) try: trustor_user_id = keystone.user_id_from_auth(trustor) trustee_user_id = keystone.user_id_from_auth(trustee) client = keystone.client_from_auth(trustor) trust = client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, impersonation=impersonation, role_names=role_names, project=project_id, allow_redelegation=allow_redelegation) LOG.debug('Created trust {trust_id}'.format( trust_id=six.text_type(trust.id))) return trust.id except Exception as e: LOG.error('Unable to create trust (reason: {reason})'.format(reason=e)) raise ex.CreationFailed(_('Failed to create trust')) def create_trust_for_cluster(cluster, expires=True): '''Create a trust for a cluster This delegates a trust from the current user to the Sahara admin user based on the current context roles, and then adds the trust identifier to the cluster object. :param expires: The trust will expire if this is set to True. ''' ctx = context.current() cluster = conductor.cluster_get(ctx, cluster) if CONF.use_identity_api_v3 and not cluster.trust_id: trustor = keystone.auth() trustee = keystone.auth_for_admin( project_name=CONF.trustee.project_name) trust_id = create_trust(trustor=trustor, trustee=trustee, role_names=ctx.roles, allow_redelegation=True) conductor.cluster_update(ctx, cluster, {'trust_id': trust_id}) def delete_trust(trustee, trust_id): '''Delete a trust from a trustee :param trustee: The user to delete the trust from, this is an auth plugin. :param trust_id: The identifier of the trust to delete. :raises DeletionFailed: If the trust cannot be deleted. ''' try: client = keystone.client_from_auth(trustee) client.trusts.delete(trust_id) LOG.debug('Deleted trust {trust_id}'.format( trust_id=six.text_type(trust_id))) except Exception as e: LOG.error('Unable to delete trust (reason: {reason})'.format(reason=e)) raise ex.DeletionFailed( _('Failed to delete trust {0}').format(trust_id)) def delete_trust_from_cluster(cluster): '''Delete a trust from a cluster If the cluster has a trust delegated to it, then delete it and set the trust id to None. :param cluster: The cluster to delete the trust from. ''' ctx = context.current() cluster = conductor.cluster_get(ctx, cluster) if CONF.use_identity_api_v3 and cluster.trust_id: keystone_auth = keystone.auth_for_admin(trust_id=cluster.trust_id) delete_trust(keystone_auth, cluster.trust_id) conductor.cluster_update(ctx, cluster, {'trust_id': None}) def use_os_admin_auth_token(cluster): '''Set the current context to the admin user's trust scoped token This will configure the current context to the admin user's identity with the cluster's tenant. It will also generate an authentication token based on the admin user and a delegated trust associated with the cluster. :param cluster: The cluster to use for tenant and trust identification. ''' ctx = context.current() cluster = conductor.cluster_get(ctx, cluster) if CONF.use_identity_api_v3 and cluster.trust_id: ctx.username = CONF.trustee.username ctx.tenant_id = cluster.tenant_id ctx.auth_plugin = keystone.auth_for_admin( trust_id=cluster.trust_id) ctx.auth_token = context.get_auth_token() ctx.service_catalog = json.dumps( keystone.service_catalog_from_auth(ctx.auth_plugin)) def get_os_admin_auth_plugin(cluster): '''Return an admin auth plugin based on the cluster trust id or project If a trust id is available for the cluster, then it is used to create an auth plugin scoped to the trust. If not, the project name from the current context is used to scope the auth plugin. :param cluster: The id of the cluster to use for trust identification. ''' ctx = context.current() cluster = conductor.cluster_get(ctx, cluster) if CONF.use_identity_api_v3 and cluster.trust_id: return keystone.auth_for_admin(trust_id=cluster.trust_id) return keystone.auth_for_admin(project_name=ctx.tenant_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validation.py0000664000175000017500000001561100000000000020467 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_utils import reflection from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils import api as u from sahara.utils import api_validator from sahara.utils import types def _get_path(path): if path: path_string = path[0] for x in path[1:]: path_string += '[%s]' % str(x) return path_string + ': ' return '' def _generate_error(errors): message = [_get_path(list(e.path)) + e.message for e in errors] if message: return ex.SaharaException('\n'.join(message), "VALIDATION_ERROR") def validate_pagination_limit(): request_args = u.get_request_args() if 'limit' in request_args: if types.is_int(request_args['limit']): if not int(request_args['limit']) > 0: raise ex.SaharaException( _("'limit' must be positive integer"), 400) else: raise ex.SaharaException( _("'limit' must be positive integer"), 400) def get_sorting_field(): request_args = u.get_request_args() if 'sort_by' in request_args: sort_by = request_args['sort_by'] if sort_by: sort_by = sort_by[1:] if sort_by[0] == '-' else sort_by return sort_by return None def validate_sorting_clusters(): field = get_sorting_field() if field is None: return if field not in ['id', 'name', 'plugin_name', 'hadoop_version', 'status']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate_sorting_cluster_templates(): field = get_sorting_field() if field is None: return if field not in ['id', 'name', 'plugin_name', 'hadoop_version', 'created_at', 'updated_at']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate_sorting_node_group_templates(): field = get_sorting_field() if field is None: return if field not in ['id', 'name', 'plugin_name', 'hadoop_version', 'created_at', 'updated_at']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate_sorting_job_binaries(): field = get_sorting_field() if field is None: return if field not in ['id', 'name', 'created_at', 'updated_at']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate_sorting_job_binary_internals(): field = get_sorting_field() if field is None: return if field not in ['id', 'name', 'created_at', 'updated_at']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate_sorting_data_sources(): field = get_sorting_field() if field is None: return if field not in ['id', 'name', 'type', 'created_at', 'updated_at']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate_sorting_jobs(): field = get_sorting_field() if field is None: return if field not in ['id', 'name', 'type', 'created_at', 'updated_at']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate_sorting_job_executions(): field = get_sorting_field() if field is None: return if field not in ['id', 'job_template', 'cluster', 'status']: raise ex.SaharaException( _("Unknown field for sorting %s") % field, 400) def validate(schema, *validators): def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): request_data = u.request_data() try: if schema: validator = api_validator.ApiValidator(schema) errors = validator.iter_errors(request_data) error = _generate_error(errors) if error: return u.bad_request(error) if validators: for validator in validators: validator(**kwargs) except ex.SaharaException as e: return u.bad_request(e) except Exception as e: return u.internal_error( 500, "Error occurred during validation", e) return func(*args, **kwargs) return handler return decorator def check_exists(get_func, *id_prop, **get_args): def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): if id_prop and not get_args: get_args['id'] = id_prop[0] if 'marker' in id_prop: if 'marker' not in u.get_request_args(): return func(*args, **kwargs) kwargs['marker'] = u.get_request_args()['marker'] get_kwargs = {} for get_arg in get_args: get_kwargs[get_arg] = kwargs[get_args[get_arg]] obj = None try: obj = get_func(**get_kwargs) except Exception as e: cls_name = reflection.get_class_name(e, fully_qualified=False) if 'notfound' not in cls_name.lower(): raise e if obj is None: e = ex.NotFoundException(get_kwargs, _('Object with %s not found')) return u.not_found(e) if 'marker' in kwargs: del(kwargs['marker']) return func(*args, **kwargs) return handler return decorator def validate_request_params(supported_params): def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): pagination_params = ['marker', 'limit', 'sort_by'] func_name = func.__name__ params = u.get_request_args() for param in params.keys(): if (param not in supported_params and param not in pagination_params): return u.invalid_param_error( 400, 'The only valid params for %s are %s and %s' % ( func_name, supported_params, pagination_params)) return func(*args, **kwargs) return handler return decorator ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.745891 sahara-16.0.0/sahara/service/validations/0000775000175000017500000000000000000000000020274 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/__init__.py0000664000175000017500000000000000000000000022373 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/acl.py0000664000175000017500000000436200000000000021412 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from sahara import exceptions as ex from sahara.i18n import _ def check_tenant_for_delete(context, object): if object.tenant_id != context.tenant_id: raise ex.DeletionFailed( _("{object} with id '{id}' could not be deleted because " "it wasn't created in this tenant").format( object=type(object).__name__, id=object.id)) def check_tenant_for_update(context, object): if object.tenant_id != context.tenant_id: raise ex.UpdateFailedException( object.id, _("{object} with id '%s' could not be updated because " "it wasn't created in this tenant").format( object=type(object).__name__)) def check_protected_from_delete(object): if object.is_protected: raise ex.DeletionFailed( _("{object} with id '{id}' could not be deleted because " "it's marked as protected").format( object=type(object).__name__, id=object.id)) def check_protected_from_update(object, data): if object.is_protected and data.get('is_protected', True): # Okay, the only thing we can allow here is a change # to 'is_public', so we have to make sure no other values # are changing if 'is_public' in data: obj = object.to_dict() if all(k == 'is_public' or ( k in obj and obj[k] == v) for k, v in six.iteritems(data)): return raise ex.UpdateFailedException( object.id, _("{object} with id '%s' could not be updated " "because it's marked as protected").format( object=type(object).__name__)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/base.py0000664000175000017500000004036000000000000021563 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import novaclient.exceptions as nova_ex from oslo_config import cfg from oslo_utils import uuidutils import six from sahara import conductor as cond from sahara import context import sahara.exceptions as ex from sahara.i18n import _ import sahara.plugins.base as plugin_base from sahara.service.api import v10 as api from sahara.utils import general as g import sahara.utils.openstack.cinder as cinder from sahara.utils.openstack import images as sahara_images import sahara.utils.openstack.neutron as neutron import sahara.utils.openstack.nova as nova CONF = cfg.CONF conductor = cond.API MAX_HOSTNAME_LENGTH = 64 def _get_plugin_configs(plugin_name, hadoop_version, scope=None): pl_confs = {} for config in plugin_base.PLUGINS.get_plugin( plugin_name).get_all_configs(hadoop_version): if pl_confs.get(config.applicable_target): pl_confs[config.applicable_target].append(config.name) else: pl_confs[config.applicable_target] = [config.name] return pl_confs def _check_duplicates(lst, message): invalid = [] lst = collections.Counter(lst) for key, value in six.iteritems(lst): if value > 1: invalid.append(key) if len(invalid) > 0: raise ex.InvalidDataException(message % invalid) # Common validation checks def check_plugin_name_exists(name): if name not in [p.name for p in api.get_plugins()]: raise ex.InvalidReferenceException( _("Sahara doesn't contain plugin with name '%s'") % name) def check_plugin_supports_version(p_name, version): if version not in plugin_base.PLUGINS.get_plugin(p_name).get_versions(): raise ex.InvalidReferenceException( _("Requested plugin '%(name)s' doesn't support version " "'%(version)s'") % {'name': p_name, 'version': version}) def check_plugin_labels(p_name, version): plugin_base.PLUGINS.validate_plugin_labels(p_name, version) def check_image_registered(image_id): if image_id not in ( [i.id for i in sahara_images.image_manager().list_registered()]): raise ex.InvalidReferenceException( _("Requested image '%s' is not registered") % image_id) def check_node_group_configs(plugin_name, hadoop_version, ng_configs, plugin_configs=None): # TODO(aignatov): Should have scope and config type validations pl_confs = plugin_configs or _get_plugin_configs(plugin_name, hadoop_version) for app_target, configs in ng_configs.items(): if app_target not in pl_confs: raise ex.InvalidReferenceException( _("Plugin doesn't contain applicable target '%s'") % app_target) for name, values in configs.items(): if name not in pl_confs[app_target]: raise ex.InvalidReferenceException( _("Plugin's applicable target '%(target)s' doesn't " "contain config with name '%(name)s'") % {'target': app_target, 'name': name}) def check_all_configurations(data): plugin_version = 'hadoop_version' if data.get('plugin_version'): plugin_version = 'plugin_version' pl_confs = _get_plugin_configs(data['plugin_name'], data[plugin_version]) if data.get('cluster_configs'): check_node_group_configs(data['plugin_name'], data[plugin_version], data['cluster_configs'], plugin_configs=pl_confs) if data.get('node_groups'): check_duplicates_node_groups_names(data['node_groups']) for ng in data['node_groups']: check_node_group_basic_fields(data['plugin_name'], data[plugin_version], ng, pl_confs) # NodeGroup related checks def check_node_group_basic_fields(plugin_name, hadoop_version, ng, plugin_configs=None): if ng.get('node_group_template_id'): ng_tmpl_id = ng['node_group_template_id'] check_node_group_template_exists(ng_tmpl_id) ng_tmpl = api.get_node_group_template(ng_tmpl_id).to_wrapped_dict() check_node_group_basic_fields(plugin_name, hadoop_version, ng_tmpl['node_group_template'], plugin_configs) if ng.get('node_configs'): check_node_group_configs(plugin_name, hadoop_version, ng['node_configs'], plugin_configs) if ng.get('flavor_id'): check_flavor_exists(ng['flavor_id']) if ng.get('node_processes'): check_node_processes(plugin_name, hadoop_version, ng['node_processes']) if ng.get('image_id'): check_image_registered(ng['image_id']) if ng.get('volumes_per_node'): if not cinder.check_cinder_exists(): raise ex.InvalidReferenceException(_("Cinder is not supported")) if ng.get('volumes_availability_zone'): check_volume_availability_zone_exist( ng['volumes_availability_zone']) if ng.get('volume_type'): check_volume_type_exists(ng['volume_type']) if not ng.get('volumes_size'): raise ex.InvalidReferenceException( _("You must specify a volumes_size parameter")) if ng.get('floating_ip_pool'): check_floatingip_pool_exists(ng['floating_ip_pool']) if ng.get('security_groups'): check_security_groups_exist(ng['security_groups']) if ng.get('availability_zone'): check_availability_zone_exist(ng['availability_zone']) def check_flavor_exists(flavor_id): flavor_list = nova.client().flavors.list() if flavor_id not in [flavor.id for flavor in flavor_list]: raise ex.NotFoundException( flavor_id, _("Requested flavor '%s' not found")) def check_security_groups_exist(security_groups): security_group_list = neutron.client().list_security_groups() allowed_groups = set() for sg in security_group_list['security_groups']: allowed_groups.add(sg['name']) allowed_groups.add(sg['id']) for sg in security_groups: if sg not in allowed_groups: raise ex.NotFoundException( sg, _("Security group '%s' not found")) def check_floatingip_pool_exists(pool_id): network = None network = neutron.get_network(pool_id) if not network: raise ex.NotFoundException(pool_id, _("Floating IP pool %s not found")) def check_node_processes(plugin_name, version, node_processes): _check_duplicates(node_processes, _("Duplicates in node processes have " "been detected: %s")) plugin_processes = [] for process in plugin_base.PLUGINS.get_plugin( plugin_name).get_node_processes(version).values(): plugin_processes += process plugin_processes = set(plugin_processes) invalid_processes = [] for node_process in node_processes: if node_process not in plugin_processes: invalid_processes.append(node_process) if len(invalid_processes) > 0: raise ex.InvalidReferenceException( _("Plugin doesn't support the following node processes: %s") % sorted(invalid_processes)) def check_duplicates_node_groups_names(node_groups): ng_names = [ng['name'] for ng in node_groups] _check_duplicates( ng_names, _("Duplicates in node group names are detected: %s")) def check_availability_zone_exist(az): az_list = nova.client().availability_zones.list(False) az_names = [a.zoneName for a in az_list] if az not in az_names: raise ex.NotFoundException( az, _("Nova availability zone '%s' not found")) def check_volume_availability_zone_exist(az): az_list = cinder.client().availability_zones.list() az_names = [a.zoneName for a in az_list] if az not in az_names: raise ex.NotFoundException( az, _("Cinder availability zone '%s' not found")) def check_volume_type_exists(volume_type): volume_types = cinder.client().volume_types.list() volume_types = list(filter(lambda x: x.name == volume_type, volume_types)) if len(volume_types) == 1 and volume_types[0].name == volume_type: return raise ex.NotFoundException(volume_type, _("Volume type '%s' not found")) # Cluster creation related checks def check_cluster_unique_name(cluster_name): if cluster_name in [cluster.name for cluster in api.get_clusters(name=cluster_name)]: raise ex.NameAlreadyExistsException( _("Cluster with name '%s' already exists") % cluster_name) def check_cluster_hostnames_lengths(cluster_name, node_groups): for ng in node_groups: longest_hostname = g.generate_instance_name(cluster_name, ng['name'], ng['count']) longest_hostname += '.' longest_hostname += CONF.node_domain if len(longest_hostname) > MAX_HOSTNAME_LENGTH: raise ex.InvalidDataException( _("Composite hostname %(host)s in provisioned cluster exceeds" " maximum limit %(limit)s characters") % {'host': longest_hostname, 'limit': MAX_HOSTNAME_LENGTH}) def check_keypair_exists(keypair): try: nova.client().keypairs.get(keypair) except nova_ex.NotFound: raise ex.NotFoundException( keypair, _("Requested keypair '%s' not found")) def check_network_exists(net_id): if not neutron.get_network(net_id): raise ex.NotFoundException(net_id, _("Network %s not found")) # Cluster templates related checks def check_cluster_template_unique_name(cluster_tmpl_name): if cluster_tmpl_name in [cluster_tmpl.name for cluster_tmpl in api.get_cluster_templates( name=cluster_tmpl_name)]: raise ex.NameAlreadyExistsException( _("Cluster template with name '%s' already exists") % cluster_tmpl_name) def check_cluster_template_exists(cluster_template_id): if not api.get_cluster_template(id=cluster_template_id): raise ex.NotFoundException( cluster_template_id, _("Cluster template with id '%s' not found")) def check_node_groups_in_cluster_templates(cluster_name, plugin_name, hadoop_version, cluster_template_id): c_t = api.get_cluster_template(id=cluster_template_id) n_groups = c_t.to_wrapped_dict()['cluster_template']['node_groups'] for node_group in n_groups: check_node_group_basic_fields(plugin_name, hadoop_version, node_group) check_cluster_hostnames_lengths(cluster_name, n_groups) # NodeGroup templates related checks def check_node_group_template_unique_name(ng_tmpl_name): if ng_tmpl_name in [ng_tmpl.name for ng_tmpl in api.get_node_group_templates(name=ng_tmpl_name)]: raise ex.NameAlreadyExistsException( _("NodeGroup template with name '%s' already exists") % ng_tmpl_name) def check_node_group_template_exists(ng_tmpl_id): if not api.get_node_group_template(id=ng_tmpl_id): raise ex.NotFoundException( ng_tmpl_id, _("NodeGroup template with id '%s' not found")) def _get_floating_ip_pool(node_group): # tmckay-fp I think this would be the method we needed to # get floating_ip_pool for instances if node_group.get('floating_ip_pool'): return node_group['floating_ip_pool'] if node_group.get('node_group_template_id'): ctx = context.ctx() ngt = conductor.node_group_template_get( ctx, node_group['node_group_template_id']) if ngt.get('floating_ip_pool'): return ngt['floating_ip_pool'] return None # Cluster scaling def check_resize(cluster, r_node_groups): ng_map = {} for ng in cluster.node_groups: ng_map[ng.name] = ng check_duplicates_node_groups_names(r_node_groups) for ng in r_node_groups: if ng['name'] not in ng_map.keys(): raise ex.InvalidReferenceException( _("Cluster doesn't contain node group with name '%s'") % ng['name']) node_group = ng_map[ng['name']] if node_group.get('node_group_template_id'): ng_tmpl_id = node_group['node_group_template_id'] check_node_group_template_exists(ng_tmpl_id) ng_tmp = api.get_node_group_template(ng_tmpl_id).to_wrapped_dict() check_node_group_basic_fields(cluster.plugin_name, cluster.hadoop_version, ng_tmp['node_group_template']) for scaling_ng in r_node_groups: current_count = ng_map[scaling_ng['name']].count new_count = scaling_ng['count'] count_diff = current_count - new_count if 'instances' in scaling_ng: if len(scaling_ng['instances']) > count_diff: raise ex.InvalidDataException( _("Number of specific instances (%(instance)s) to" " delete can not be greater than the count difference" " (%(count)s during scaling") % {'instance': str(len(scaling_ng['instances'])), 'count': str(count_diff)}) else: if len(scaling_ng['instances']) > 0: is_uuid = uuidutils.is_uuid_like( scaling_ng['instances'][0]) if is_uuid: for instance in scaling_ng['instances']: if not uuidutils.is_uuid_like(instance): raise ex.InvalidReferenceException( _("You can only reference instances by" " Name or UUID, not both on the same" " request")) else: for instance in scaling_ng['instances']: if uuidutils.is_uuid_like(instance): raise ex.InvalidReferenceException( _("You can only reference instances by" " Name or UUID, not both on the same" " request")) _check_duplicates(scaling_ng['instances'], _("Duplicate entry for instances to" " delete")) def check_add_node_groups(cluster, add_node_groups): cluster_ng_names = [ng.name for ng in cluster.node_groups] check_duplicates_node_groups_names(add_node_groups) pl_confs = _get_plugin_configs(cluster.plugin_name, cluster.hadoop_version) for ng in add_node_groups: if ng['name'] in cluster_ng_names: raise ex.InvalidReferenceException( _("Can't add new nodegroup. Cluster already has nodegroup with" " name '%s'") % ng['name']) check_node_group_basic_fields(cluster.plugin_name, cluster.hadoop_version, ng, pl_confs) # Tags def check_required_image_tags(plugin_name, hadoop_version, image_id): image = api.get_image(id=image_id) plugin = plugin_base.PLUGINS.get_plugin(plugin_name) req_tags = set(plugin.get_required_image_tags(hadoop_version)) req_tags = list(req_tags.difference(set(image.tags))) if req_tags: raise ex.InvalidReferenceException( _("Requested image '%(image)s' doesn't contain required" " tags: %(tags)s") % {'image': image_id, 'tags': req_tags}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/cluster_template_schema.py0000664000175000017500000000775700000000000025562 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara.service.validations import node_group_template_schema as ngt_schema from sahara.service.validations import shares def _build_ng_schema_for_cluster_tmpl(): cl_tmpl_ng_schema = copy.deepcopy(ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA) cl_tmpl_ng_schema["properties"].update({"count": {"type": "integer"}}) cl_tmpl_ng_schema["required"] = ["name", "flavor_id", "node_processes", "count"] del cl_tmpl_ng_schema["properties"]["plugin_name"] del cl_tmpl_ng_schema["properties"]["hadoop_version"] return cl_tmpl_ng_schema _cluster_tmpl_ng_schema = _build_ng_schema_for_cluster_tmpl() def _build_ng_tmpl_schema_for_cluster_template(): cl_tmpl_ng_tmpl_schema = copy.deepcopy(_cluster_tmpl_ng_schema) cl_tmpl_ng_tmpl_schema["properties"].update( { "node_group_template_id": { "type": "string", "format": "uuid", } }) cl_tmpl_ng_tmpl_schema["required"] = ["node_group_template_id", "name", "count"] return cl_tmpl_ng_tmpl_schema _cluster_tmpl_ng_tmpl_schema = _build_ng_tmpl_schema_for_cluster_template() CLUSTER_TEMPLATE_SCHEMA = { "type": "object", "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name_hostname", }, "plugin_name": { "type": "string", }, "hadoop_version": { "type": "string", }, "default_image_id": { "type": ["string", "null"], "format": "uuid", }, "cluster_configs": { "type": ["configs", "null"], }, "node_groups": { "type": ["array", "null"], "items": { "oneOf": [_cluster_tmpl_ng_tmpl_schema, _cluster_tmpl_ng_schema] } }, "anti_affinity": { "type": ["array", "null"], "items": { "type": "string", }, }, "description": { "type": ["string", "null"], }, "neutron_management_network": { "type": ["string", "null"], "format": "uuid" }, "shares": copy.deepcopy(shares.SHARE_SCHEMA), "use_autoconfig": { "type": ["boolean", "null"], }, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], }, "domain_name": { "type": ["string", "null"], } }, "additionalProperties": False, "required": [ "name", "plugin_name", "hadoop_version", ] } # APIv2: renaming hadoop_version -> plugin_version CLUSTER_TEMPLATE_SCHEMA_V2 = copy.deepcopy(CLUSTER_TEMPLATE_SCHEMA) del CLUSTER_TEMPLATE_SCHEMA_V2["properties"]["hadoop_version"] CLUSTER_TEMPLATE_SCHEMA_V2["required"].remove("hadoop_version") CLUSTER_TEMPLATE_SCHEMA_V2["properties"].update({ "plugin_version": { "type": "string", }}) CLUSTER_TEMPLATE_SCHEMA_V2["required"].append("plugin_version") CLUSTER_TEMPLATE_UPDATE_SCHEMA = copy.copy(CLUSTER_TEMPLATE_SCHEMA) CLUSTER_TEMPLATE_UPDATE_SCHEMA["required"] = [] CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2 = copy.copy(CLUSTER_TEMPLATE_SCHEMA_V2) CLUSTER_TEMPLATE_UPDATE_SCHEMA_V2["required"] = [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/cluster_templates.py0000664000175000017500000000721500000000000024412 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.api import v10 as api import sahara.service.validations.base as b from sahara.service.validations import shares def check_cluster_template_create(data, **kwargs): plugin_version = 'hadoop_version' if data.get('plugin_version'): plugin_version = 'plugin_version' b.check_cluster_template_unique_name(data['name']) b.check_plugin_name_exists(data['plugin_name']) b.check_plugin_supports_version(data['plugin_name'], data[plugin_version]) if data.get('default_image_id'): b.check_image_registered(data['default_image_id']) b.check_required_image_tags(data['plugin_name'], data[plugin_version], data['default_image_id']) b.check_all_configurations(data) if data.get('anti_affinity'): b.check_node_processes(data['plugin_name'], data[plugin_version], data['anti_affinity']) if data.get('neutron_management_network'): b.check_network_exists(data['neutron_management_network']) if data.get('shares'): shares.check_shares(data['shares']) def check_cluster_template_usage(cluster_template_id, **kwargs): users = [] for cluster in api.get_clusters(): if cluster_template_id == cluster.cluster_template_id: users.append(cluster.name) if users: raise ex.InvalidReferenceException( _("Cluster template %(id)s in use by %(clusters)s") % {'id': cluster_template_id, 'clusters': ', '.join(users)}) def check_cluster_template_update(cluster_template_id, data, **kwargs): plugin_version = 'hadoop_version' if data.get('plugin_version'): plugin_version = 'plugin_version' if data.get('plugin_name') and not data.get(plugin_version): raise ex.InvalidReferenceException( _("You must specify a %s value " "for your plugin_name") % plugin_version) if data.get('plugin_name'): plugin = data['plugin_name'] version = data[plugin_version] b.check_plugin_name_exists(plugin) b.check_plugin_supports_version(plugin, version) b.check_all_configurations(data) else: cluster_template = api.get_cluster_template(cluster_template_id) plugin = cluster_template.plugin_name if data.get(plugin_version): version = data.get(plugin_version) b.check_plugin_supports_version(plugin, version) else: version = cluster_template.hadoop_version if data.get('default_image_id'): b.check_image_registered(data['default_image_id']) b.check_required_image_tags(plugin, version, data['default_image_id']) if data.get('anti_affinity'): b.check_node_processes(plugin, version, data['anti_affinity']) if data.get('neutron_management_network'): b.check_network_exists(data['neutron_management_network']) if data.get('shares'): shares.check_shares(data['shares']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/clusters.py0000664000175000017500000001054700000000000022521 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from sahara import context import sahara.exceptions as ex from sahara.i18n import _ from sahara.service.api import v10 as api from sahara.service.health import verification_base from sahara.service.validations import acl import sahara.service.validations.base as b CONF = cfg.CONF def check_cluster_create(data, **kwargs): b.check_cluster_unique_name(data['name']) _check_cluster_create(data) def check_multiple_clusters_create(data, **kwargs): _check_cluster_create(data) for counter in range(data['count']): cluster_name = api.get_multiple_cluster_name(data['count'], data['name'], counter + 1) b.check_cluster_unique_name(cluster_name) def check_one_or_multiple_clusters_create(data, **kwargs): if data.get('count', None) is not None: check_multiple_clusters_create(data, **kwargs) else: check_cluster_create(data, **kwargs) def _check_cluster_create(data): plugin_version = 'hadoop_version' if data.get('plugin_version'): plugin_version = 'plugin_version' b.check_plugin_name_exists(data['plugin_name']) b.check_plugin_supports_version(data['plugin_name'], data[plugin_version]) b.check_plugin_labels( data['plugin_name'], data[plugin_version]) if data.get('cluster_template_id'): ct_id = data['cluster_template_id'] b.check_cluster_template_exists(ct_id) if not data.get('node_groups'): b.check_node_groups_in_cluster_templates(data['name'], data['plugin_name'], data[plugin_version], ct_id) if data.get('user_keypair_id'): b.check_keypair_exists(data['user_keypair_id']) default_image_id = _get_cluster_field(data, 'default_image_id') if default_image_id: b.check_image_registered(default_image_id) b.check_required_image_tags(data['plugin_name'], data[plugin_version], default_image_id) else: raise ex.NotFoundException('default_image_id', _("'%s' field is not found")) b.check_all_configurations(data) if data.get('anti_affinity'): b.check_node_processes(data['plugin_name'], data[plugin_version], data['anti_affinity']) if data.get('node_groups'): b.check_cluster_hostnames_lengths(data['name'], data['node_groups']) neutron_net_id = _get_cluster_field(data, 'neutron_management_network') if neutron_net_id: b.check_network_exists(neutron_net_id) else: raise ex.NotFoundException('neutron_management_network', _("'%s' field is not found")) def _get_cluster_field(cluster, field): if cluster.get(field): return cluster[field] if cluster.get('cluster_template_id'): cluster_template = api.get_cluster_template( id=cluster['cluster_template_id']) if cluster_template.get(field): return cluster_template[field] return None def check_cluster_delete(cluster_id, **kwargs): cluster = api.get_cluster(cluster_id) acl.check_tenant_for_delete(context.current(), cluster) acl.check_protected_from_delete(cluster) def check_cluster_update(cluster_id, data, **kwargs): cluster = api.get_cluster(cluster_id) verification = verification_base.validate_verification_ops( cluster, data) acl.check_tenant_for_update(context.current(), cluster) if not verification: acl.check_protected_from_update(cluster, data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/clusters_scaling.py0000664000175000017500000000655000000000000024220 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import context import sahara.exceptions as ex from sahara.i18n import _ import sahara.plugins.base as plugin_base from sahara.service import api as service_api from sahara.service.api import v10 as api from sahara.service.validations import acl import sahara.service.validations.base as b from sahara.utils import cluster as c_u def check_cluster_scaling(data, cluster_id, **kwargs): ctx = context.current() cluster = api.get_cluster(id=cluster_id) if cluster is None: raise ex.NotFoundException( {'id': cluster_id}, _('Object with %s not found')) b.check_plugin_labels( cluster.plugin_name, cluster.hadoop_version) acl.check_tenant_for_update(ctx, cluster) acl.check_protected_from_update(cluster, data) cluster_engine = cluster.sahara_info.get( 'infrastructure_engine') if cluster.sahara_info else None engine_type_and_version = service_api.OPS.get_engine_type_and_version() if (not cluster_engine and not engine_type_and_version.startswith('direct')): raise ex.InvalidReferenceException( _("Cluster created before Juno release " "can't be scaled with %(engine)s engine") % {"engine": engine_type_and_version}) if (cluster.sahara_info and cluster_engine != engine_type_and_version): raise ex.InvalidReferenceException( _("Cluster created with %(old_engine)s infrastructure engine " "can't be scaled with %(new_engine)s engine") % {"old_engine": cluster.sahara_info.get('infrastructure_engine'), "new_engine": engine_type_and_version}) if not (plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'scale_cluster') and ( plugin_base.PLUGINS.is_plugin_implements(cluster.plugin_name, 'decommission_nodes'))): raise ex.InvalidReferenceException( _("Requested plugin '%s' doesn't support cluster scaling feature") % cluster.plugin_name) if cluster.status != c_u.CLUSTER_STATUS_ACTIVE: raise ex.InvalidReferenceException( _("Cluster cannot be scaled not in 'Active' status. " "Cluster status: %s") % cluster.status) if cluster.user_keypair_id: b.check_keypair_exists(cluster.user_keypair_id) if cluster.default_image_id: b.check_image_registered(cluster.default_image_id) if data.get("resize_node_groups"): b.check_resize(cluster, data['resize_node_groups']) if data.get("add_node_groups"): b.check_add_node_groups(cluster, data['add_node_groups']) b.check_cluster_hostnames_lengths(cluster.name, data['add_node_groups']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/clusters_schema.py0000664000175000017500000001065400000000000024040 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import sahara.exceptions as ex from sahara.service.health import verification_base import sahara.service.validations.cluster_template_schema as ct_schema from sahara.service.validations import shares def _build_node_groups_schema(): schema = copy.deepcopy(ct_schema.CLUSTER_TEMPLATE_SCHEMA) return schema['properties']['node_groups'] def _build_cluster_schema(api_version='v1'): if api_version == 'v1': cluster_schema = copy.deepcopy(ct_schema.CLUSTER_TEMPLATE_SCHEMA) elif api_version == 'v2': cluster_schema = copy.deepcopy(ct_schema.CLUSTER_TEMPLATE_SCHEMA_V2) else: raise ex.InvalidDataException('Invalid API version %s' % api_version) cluster_schema['properties'].update({ "is_transient": { "type": "boolean" }, "user_keypair_id": { "type": "string", "format": "valid_keypair_name", }, "cluster_template_id": { "type": "string", "format": "uuid", }}) if api_version == 'v2': cluster_schema['properties'].update({ "count": { "type": "integer" }}) return cluster_schema CLUSTER_SCHEMA = _build_cluster_schema() CLUSTER_SCHEMA_V2 = _build_cluster_schema('v2') MULTIPLE_CLUSTER_SCHEMA = copy.deepcopy(CLUSTER_SCHEMA) MULTIPLE_CLUSTER_SCHEMA['properties'].update({ "count": { "type": "integer" }}) MULTIPLE_CLUSTER_SCHEMA['required'].append('count') CLUSTER_UPDATE_SCHEMA = { "type": "object", "properties": { "description": { "type": ["string", "null"] }, "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name_hostname", }, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], }, "verification": { "type": "object", "properties": { "status": { "enum": verification_base.get_possible_ops(), } }, }, "shares": copy.deepcopy(shares.SHARE_SCHEMA), }, "additionalProperties": False, "required": [] } CLUSTER_UPDATE_SCHEMA_V2 = copy.deepcopy(CLUSTER_UPDATE_SCHEMA) CLUSTER_UPDATE_SCHEMA_V2['properties'].update({ "update_keypair": { "type": ["boolean", "null"] }}) CLUSTER_SCALING_SCHEMA = { "type": "object", "properties": { "resize_node_groups": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string", }, "count": { "type": "integer", "minimum": 0, }, }, "additionalProperties": False, "required": [ "name", "count", ] }, "minItems": 1 }, "add_node_groups": _build_node_groups_schema(), }, "additionalProperties": False, "anyOf": [ { "required": ["resize_node_groups"] }, { "required": ["add_node_groups"] } ] } CLUSTER_SCALING_SCHEMA_V2 = copy.deepcopy(CLUSTER_SCALING_SCHEMA) CLUSTER_SCALING_SCHEMA_V2['properties']['resize_node_groups'][ 'items']['properties'].update( { "instances": { "type": "array", "items": { "type": "string", }, } }) CLUSTER_DELETE_SCHEMA_V2 = { "type": "object", "properties": { "force": { "type": "boolean" } }, "additionalProperties": False, "required": [] } ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/service/validations/edp/0000775000175000017500000000000000000000000021044 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/__init__.py0000664000175000017500000000000000000000000023143 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/base.py0000664000175000017500000000557400000000000022343 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Cluster creation related checks""" from sahara import conductor as c from sahara import context from sahara import exceptions as ex from sahara.i18n import _ conductor = c.API data_source_type = { "type": "string", "enum": ["swift", "hdfs", "maprfs", "manila", "s3"] } job_configs = { "type": "object", "properties": { "configs": { "type": "simple_config", }, "params": { "type": "simple_config", }, "args": { "type": "array", "items": { "type": "string" } }, "job_execution_info": { "type": "simple_config", } }, "additionalProperties": False, } def check_data_source_unique_name(ds_name): if ds_name in [ds.name for ds in conductor.data_source_get_all(context.ctx(), name=ds_name)]: raise ex.NameAlreadyExistsException( _("Data source with name '%s' " "already exists") % ds_name) def check_data_source_exists(data_source_id): if not conductor.data_source_get(context.ctx(), data_source_id): raise ex.InvalidReferenceException( _("DataSource with id '%s' doesn't exist") % data_source_id) def check_job_unique_name(job_name): if job_name in [job.name for job in conductor.job_get_all(context.ctx(), name=job_name)]: raise ex.NameAlreadyExistsException(_("Job with name '%s' " "already exists") % job_name) def check_job_binary_internal_exists(jbi_id): if not conductor.job_binary_internal_get(context.ctx(), jbi_id): raise ex.InvalidReferenceException( _("JobBinaryInternal with id '%s' doesn't exist") % jbi_id) def check_data_sources_are_different(data_source_1_id, data_source_2_id): ds1 = conductor.data_source_get(context.ctx(), data_source_1_id) ds2 = conductor.data_source_get(context.ctx(), data_source_2_id) if ds1.type == ds2.type and ds1.url == ds2.url: raise ex.InvalidDataException(_('Provided input and output ' 'DataSources reference the same ' 'location: %s') % ds1.url) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/data_source.py0000664000175000017500000000515400000000000023714 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_config import cfg from sahara import conductor as c from sahara import context import sahara.exceptions as ex from sahara.i18n import _ import sahara.service.edp.data_sources.manager as ds_manager import sahara.service.validations.edp.base as b CONF = cfg.CONF def check_data_source_create(data, **kwargs): b.check_data_source_unique_name(data['name']) _check_data_source(data) def _check_datasource_placeholder(url): if url is None: return total_length = 0 substrings = re.findall(r"%RANDSTR\(([\-]?\d+)\)%", url) for length in map(int, substrings): if length <= 0: raise ex.InvalidDataException(_("Requested RANDSTR length" " must be positive.")) total_length += length if total_length > 1024: raise ex.InvalidDataException(_("Requested RANDSTR length is" " too long, please choose a " "value less than 1024.")) def _check_data_source(data): _check_datasource_placeholder(data["url"]) if data["type"] in CONF.data_source_types: ds_manager.DATA_SOURCES.get_data_source(data["type"]).validate(data) def check_data_source_update(data, data_source_id): ctx = context.ctx() jobs = c.API.job_execution_get_all(ctx) pending_jobs = [job for job in jobs if job.info["status"] == "PENDING"] for job in pending_jobs: if data_source_id in job.data_source_urls: raise ex.UpdateFailedException( _("DataSource is used in a " "PENDING Job and can not be updated.")) ds = c.API.data_source_get(ctx, data_source_id) if 'name' in data and data['name'] != ds.name: b.check_data_source_unique_name(data['name']) check_data = {'type': data.get('type', None) or ds.type, 'url': data.get('url', None) or ds.url, 'credentials': data.get( 'credentials', None) or ds.credentials} _check_data_source(check_data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/data_source_schema.py0000664000175000017500000000301000000000000025221 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import sahara.service.validations.edp.base as b DATA_SOURCE_SCHEMA = { "type": "object", "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name" }, "description": { "type": "string" }, "type": b.data_source_type, "url": { "type": "string", }, "credentials": { "type": "object" }, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], } }, "additionalProperties": False, "required": [ "name", "type", "url" ] } # For an update we do not require any fields but we want the given # fields to be validated DATA_SOURCE_UPDATE_SCHEMA = copy.copy(DATA_SOURCE_SCHEMA) DATA_SOURCE_UPDATE_SCHEMA["required"] = [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job.py0000664000175000017500000000514400000000000022174 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sahara.exceptions as e from sahara.i18n import _ from sahara.service.api import v11 as api from sahara.service.validations.edp import job_interface as j_i from sahara.utils import edp def _check_binaries(values): for job_binary in values: if not api.get_job_binary(job_binary): raise e.NotFoundException(job_binary, _("Job binary '%s' does not exist")) def check_mains_libs(data, **kwargs): mains = data.get("mains", []) libs = data.get("libs", []) job_type, subtype = edp.split_job_type(data.get("type")) streaming = (job_type == edp.JOB_TYPE_MAPREDUCE and subtype == edp.JOB_SUBTYPE_STREAMING) # These types must have a value in mains and may also use libs if job_type in [edp.JOB_TYPE_PIG, edp.JOB_TYPE_HIVE, edp.JOB_TYPE_SHELL, edp.JOB_TYPE_SPARK, edp.JOB_TYPE_STORM, edp.JOB_TYPE_PYLEUS]: if not mains: if job_type in [edp.JOB_TYPE_SPARK, edp.JOB_TYPE_STORM, edp.JOB_TYPE_PYLEUS]: msg = _( "%s job requires main application jar") % data.get("type") else: msg = _("%s flow requires main script") % data.get("type") raise e.InvalidDataException(msg) # Check for overlap if set(mains).intersection(set(libs)): raise e.InvalidDataException(_("'mains' and 'libs' overlap")) else: # Java and MapReduce require libs, but MapReduce.Streaming does not if not streaming and not libs: raise e.InvalidDataException(_("%s flow requires libs") % data.get("type")) if mains: raise e.InvalidDataException(_("%s flow does not use mains") % data.get("type")) # Make sure that all referenced binaries exist _check_binaries(mains) _check_binaries(libs) def check_interface(data, **kwargs): j_i.check_job_interface(data, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_binary.py0000664000175000017500000000172000000000000023534 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging import sahara.service.edp.job_binaries.manager as jb_manager CONF = cfg.CONF LOG = logging.getLogger(__name__) def check_job_binary(data, **kwargs): job_binary_url = data.get("url", None) if job_binary_url: jb_manager.JOB_BINARIES.get_job_binary_by_url(job_binary_url). \ validate(data, **kwargs) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_binary_internal.py0000664000175000017500000000267300000000000025440 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import six import sahara.exceptions as e from sahara.i18n import _ from sahara.service.edp.job_binaries import manager as jb_manager from sahara.utils import api_validator as a CONF = cfg.CONF def is_internal_db_enabled(): return 'internal-db' in jb_manager.JOB_BINARIES.get_job_binaries() def check_job_binary_internal(data, **kwargs): if not is_internal_db_enabled(): raise e.BadJobBinaryInternalException( _("Sahara internal db is disabled for storing job binaries.")) if not (isinstance(data, six.binary_type) and len(data) > 0): raise e.BadJobBinaryInternalException() if "name" in kwargs: name = kwargs["name"] if not a.validate_name_format(name): raise e.BadJobBinaryInternalException(_("%s is not a valid name") % name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_binary_internal_schema.py0000664000175000017500000000177500000000000026762 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. JOB_BINARY_UPDATE_SCHEMA = { "type": "object", "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name" }, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], } }, "additionalProperties": False, "required": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_binary_schema.py0000664000175000017500000000275600000000000025066 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy JOB_BINARY_SCHEMA = { "type": "object", "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name" }, "description": { "type": "string" }, "url": { "type": "string", "format": "valid_job_location" }, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], }, # extra is simple_config for now because we may need not only # user-password it the case of external storage "extra": { "type": "simple_config", } }, "additionalProperties": False, "required": [ "name", "url" ] } JOB_BINARY_UPDATE_SCHEMA = copy.copy(JOB_BINARY_SCHEMA) JOB_BINARY_UPDATE_SCHEMA["required"] = [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_execution.py0000664000175000017500000001603600000000000024261 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six import time from oslo_utils import timeutils from sahara import conductor as c from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.plugins import base as plugin_base from sahara.service.edp import job_utils as j_u from sahara.service.validations import acl from sahara.service.validations import base as val_base import sahara.service.validations.edp.base as b import sahara.service.validations.edp.job_interface as j_i from sahara.utils import cluster as c_u from sahara.utils import edp conductor = c.API def _is_main_class_present(data): if data: val = data.get( 'job_configs', {}).get( 'configs', {}).get('edp.java.main_class', None) return val and isinstance(val, six.string_types) return False def check_main_class_present(data, job): if not _is_main_class_present(data): raise ex.InvalidDataException( _('%s job must specify edp.java.main_class') % job.type) def _is_topology_name_present(data): if data: val = data.get( 'job_configs', {}).get( 'configs', {}).get('topology_name', None) return val and isinstance(val, six.string_types) return False def check_topology_name_present(data, job): if not _is_main_class_present(data): raise ex.InvalidDataException( _('%s job must specify topology_name') % job.type) def _streaming_present(data): try: streaming = set(('edp.streaming.mapper', 'edp.streaming.reducer')) configs = set(data['job_configs']['configs']) return streaming.intersection(configs) == streaming except Exception: return False def check_streaming_present(data, job): if not _streaming_present(data): raise ex.InvalidDataException( _("%s job must specify streaming mapper and reducer") % job.type) def check_scheduled_job_execution_info(job_execution_info): start = job_execution_info.get('start', None) if start is None: raise ex.InvalidDataException(_( "Scheduled job must specify start time")) try: start = time.strptime(start, "%Y-%m-%d %H:%M:%S") start = timeutils.datetime.datetime.fromtimestamp(time.mktime(start)) except Exception: raise ex.InvalidDataException(_("Invalid Time Format")) now_time = timeutils.utcnow() if timeutils.delta_seconds(now_time, start) < 0: raise ex.InvalidJobExecutionInfoException(_( "Job start time should be later than now")) def check_job_execution(data, job_templates_id=None): ctx = context.ctx() job_execution_info = data.get('job_execution_info', {}) cluster = conductor.cluster_get(ctx, data['cluster_id']) if not cluster: raise ex.InvalidReferenceException( _("Cluster with id '%s' doesn't exist") % data['cluster_id']) val_base.check_plugin_labels(cluster.plugin_name, cluster.hadoop_version) jt_err_msg = _("Job template with id '%s' doesn't exist") if job_templates_id is None: job = conductor.job_get(ctx, data['job_template_id']) if not job: raise ex.InvalidReferenceException( jt_err_msg % data['job_template_id']) else: job = conductor.job_get(ctx, job_templates_id) if not job: raise ex.InvalidReferenceException( jt_err_msg % job_templates_id) plugin = plugin_base.PLUGINS.get_plugin(cluster.plugin_name) edp_engine = plugin.get_edp_engine(cluster, job.type) if not edp_engine: raise ex.InvalidReferenceException( _("Cluster with id '%(cluster_id)s' doesn't support job type " "'%(job_type)s'") % {"cluster_id": cluster.id, "job_type": job.type}) j_i.check_execution_interface(data, job) edp_engine.validate_job_execution(cluster, job, data) if 'job_execution_type' in job_execution_info: j_type = job_execution_info.get('job_execution_type', 'workflow') if j_type == 'scheduled': check_scheduled_job_execution_info(job_execution_info) def check_data_sources(data, job): if not ('input_id' in data and 'output_id' in data): raise ex.InvalidDataException(_("%s job requires 'input_id' " "and 'output_id'") % job.type) b.check_data_source_exists(data['input_id']) b.check_data_source_exists(data['output_id']) b.check_data_sources_are_different(data['input_id'], data['output_id']) def check_job_execution_cancel(job_id, **kwargs): ctx = context.current() je = conductor.job_execution_get(ctx, job_id) if je.tenant_id != ctx.tenant_id: raise ex.CancelingFailed( _("Job execution with id '%s' cannot be canceled " "because it wasn't created in this tenant") % job_id) if je.is_protected: raise ex.CancelingFailed( _("Job Execution with id '%s' cannot be canceled " "because it's marked as protected") % job_id) def check_job_execution_delete(job_id, **kwargs): ctx = context.current() je = conductor.job_execution_get(ctx, job_id) acl.check_tenant_for_delete(ctx, je) acl.check_protected_from_delete(je) def check_job_execution_update(job_id, data, **kwargs): ctx = context.current() je = conductor.job_execution_get(ctx, job_id) acl.check_tenant_for_update(ctx, je) acl.check_protected_from_update(je, data) def check_job_status_update(job_id, data): ctx = context.ctx() job_execution = conductor.job_execution_get(ctx, job_id) # check we are updating status if 'info' in data: if len(data) != 1: raise ex.InvalidJobStatus(_("Invalid status parameter")) cluster = conductor.cluster_get(ctx, job_execution.cluster_id) if cluster is None or cluster.status != c_u.CLUSTER_STATUS_ACTIVE: raise ex.InvalidDataException( _("Suspending operation can not be performed on an inactive or " "non-existent cluster")) job_templates_id = conductor.job_execution_get(ctx, job_id).job_id job_type = conductor.job_get(ctx, job_templates_id).type engine = j_u.get_plugin(cluster).get_edp_engine(cluster, job_type) if 'info' in data: if data['info']['status'] == edp.JOB_ACTION_SUSPEND: if not engine.does_engine_implement('suspend_job'): raise ex.InvalidReferenceException( _("Engine doesn't support suspending job feature")) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_execution_schema.py0000664000175000017500000000411400000000000025573 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import sahara.service.validations.edp.base as b JOB_EXEC_SCHEMA = { "type": "object", "properties": { "input_id": { "type": "string", "format": "uuid", }, "output_id": { "type": "string", "format": "uuid", }, "cluster_id": { "type": "string", "format": "uuid", }, "interface": { "type": "simple_config", }, "job_configs": b.job_configs, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], } }, "additionalProperties": False, "required": [ "cluster_id" ] } JOB_EXEC_SCHEMA_V2 = copy.deepcopy(JOB_EXEC_SCHEMA) JOB_EXEC_SCHEMA_V2['properties'].update({ "job_template_id": { "type": "string", "format": "uuid", }}) JOB_EXEC_SCHEMA_V2['required'].append('job_template_id') JOB_EXEC_UPDATE_SCHEMA = { "type": "object", "properties": { "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], }, "info": { "type": "simple_config", "properties": { "status": { "enum": ["suspend", "cancel"] } }, "additionalProperties": False } }, "additionalProperties": False, "required": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_interface.py0000664000175000017500000001621500000000000024215 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils import six from six.moves.urllib import parse as urlparse import sahara.exceptions as e from sahara.i18n import _ from sahara.service.validations.edp import base as b from sahara.utils import edp DATA_TYPE_STRING = "string" DATA_TYPE_NUMBER = "number" DATA_TYPE_DATA_SOURCE = "data_source" DATA_TYPES = [DATA_TYPE_STRING, DATA_TYPE_NUMBER, DATA_TYPE_DATA_SOURCE] DEFAULT_DATA_TYPE = DATA_TYPE_STRING INTERFACE_ARGUMENT_SCHEMA = { "type": ["array", "null"], "uniqueItems": True, "items": { "type": "object", "properties": { "name": { "type": "string", "minLength": 1 }, "description": { "type": ["string", "null"] }, "mapping_type": { "type": "string", "enum": ["args", "configs", "params"] }, "location": { "type": "string", "minLength": 1 }, "value_type": { "type": "string", "enum": DATA_TYPES, "default": "string" }, "required": { "type": "boolean" }, "default": { "type": ["string", "null"] } }, "additionalProperties": False, "required": ["name", "mapping_type", "location", "required"] } } def _check_job_interface(data, interface): names = set(arg["name"] for arg in interface) if len(names) != len(interface): raise e.InvalidDataException( _("Name must be unique within the interface for any job.")) mapping_types = set(arg["mapping_type"] for arg in interface) acceptable_types = edp.JOB_TYPES_ACCEPTABLE_CONFIGS[data["type"]] if any(m_type not in acceptable_types for m_type in mapping_types): args = {"mapping_types": str(list(acceptable_types)), "job_type": data["type"]} raise e.InvalidDataException( _("Only mapping types %(mapping_types)s are allowed for job type " "%(job_type)s.") % args) positional_args = [arg for arg in interface if arg["mapping_type"] == "args"] if not all(six.text_type(arg["location"]).isnumeric() for arg in positional_args): raise e.InvalidDataException( _("Locations of positional arguments must be an unbroken integer " "sequence ascending from 0.")) locations = set(int(arg["location"]) for arg in positional_args) if not all(i in locations for i in range(len(locations))): raise e.InvalidDataException( _("Locations of positional arguments must be an unbroken integer " "sequence ascending from 0.")) not_required = (arg for arg in positional_args if not arg["required"]) if not all(arg.get("default", None) for arg in not_required): raise e.InvalidDataException( _("Positional arguments must be given default values if they are " "not required.")) mappings = ((arg["mapping_type"], arg["location"]) for arg in interface) if len(set(mappings)) != len(interface): raise e.InvalidDataException( _("The combination of mapping type and location must be unique " "within the interface for any job.")) for arg in interface: if "value_type" not in arg: arg["value_type"] = DEFAULT_DATA_TYPE default = arg.get("default", None) if default is not None: _validate_value(arg["value_type"], default) def check_job_interface(data, **kwargs): interface = data.get("interface", []) if interface: _check_job_interface(data, interface) def _validate_data_source(value): if uuidutils.is_uuid_like(value): b.check_data_source_exists(value) else: if not urlparse.urlparse(value).scheme: raise e.InvalidDataException( _("Data source value '%s' is neither a valid data source ID " "nor a valid URL.") % value) def _validate_number(value): if not six.text_type(value).isnumeric(): raise e.InvalidDataException( _("Value '%s' is not a valid number.") % value) def _validate_string(value): if not isinstance(value, six.string_types): raise e.InvalidDataException( _("Value '%s' is not a valid string.") % value) _value_type_validators = { DATA_TYPE_STRING: _validate_string, DATA_TYPE_NUMBER: _validate_number, DATA_TYPE_DATA_SOURCE: _validate_data_source } def _validate_value(type, value): _value_type_validators[type](value) def check_execution_interface(data, job): job_int = {arg.name: arg for arg in job.interface} execution_int = data.get("interface", None) if not (job_int or execution_int): return if job_int and execution_int is None: raise e.InvalidDataException( _("An interface was specified with the template for this job. " "Please pass an interface map with this job (even if empty).")) execution_names = set(execution_int.keys()) definition_names = set(job_int.keys()) not_found_names = execution_names - definition_names if not_found_names: raise e.InvalidDataException( _("Argument names: %s were not found in the interface for this " "job.") % str(list(not_found_names))) required_names = {arg.name for arg in job.interface if arg.required} unset_names = required_names - execution_names if unset_names: raise e.InvalidDataException(_("Argument names: %s are required for " "this job.") % str(list(unset_names))) nonexistent = object() for name, value in six.iteritems(execution_int): arg = job_int[name] _validate_value(arg.value_type, value) if arg.mapping_type == "args": continue typed_configs = data.get("job_configs", {}).get(arg.mapping_type, {}) config_value = typed_configs.get(arg.location, nonexistent) if config_value is not nonexistent and config_value != value: args = {"name": name, "mapping_type": arg.mapping_type, "location": arg.location} raise e.InvalidDataException( _("Argument '%(name)s' was passed both through the interface " "and in location '%(mapping_type)s'.'%(location)s'. Please " "pass this through either the interface or the " "configuration maps, not both.") % args) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/edp/job_schema.py0000664000175000017500000000441200000000000023511 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.service.validations.edp import job_interface as j_i from sahara.utils import edp JOB_SCHEMA = { "type": "object", "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name" }, "description": { "type": "string" }, "type": { "type": "string", "enum": edp.JOB_TYPES_ALL, }, "mains": { "type": "array", "uniqueItems": True, "items": { "type": "string", "minLength": 1, } }, "libs": { "type": "array", "uniqueItems": True, "items": { "type": "string", "minLength": 1, } }, "streaming": { "type": "boolean" }, "interface": j_i.INTERFACE_ARGUMENT_SCHEMA, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], } }, "additionalProperties": False, "required": [ "name", "type", ] } JOB_UPDATE_SCHEMA = { "type": "object", "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name" }, "description": { "type": ["string", "null"] }, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], } }, "additionalProperties": False, "required": [] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/images.py0000664000175000017500000000234300000000000022115 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. image_register_schema = { "type": "object", "properties": { "username": { "type": "string", }, "description": { "type": "string", }, }, "additionalProperties": False, "required": ["username"] } image_tags_schema = { "type": "object", "properties": { "tags": { "type": "array", "items": { "type": "string", "format": "valid_tag" }, }, }, "additionalProperties": False, "required": ["tags"] } def check_image_register(data, **kwargs): pass def check_tags(data, **kwargs): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/node_group_template_schema.py0000664000175000017500000001000300000000000026214 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara.service.validations import shares NODE_GROUP_TEMPLATE_SCHEMA = { "type": "object", "properties": { "name": { "type": "string", "minLength": 1, "maxLength": 50, "format": "valid_name_hostname", }, "flavor_id": { "type": "flavor", }, "plugin_name": { "type": "string", }, "hadoop_version": { "type": "string", }, "node_processes": { "type": "array", "items": { "type": "string", }, "minItems": 1 }, "image_id": { "type": ["string", "null"], "format": "uuid", }, "node_configs": { "type": ["configs", "null"], }, "volumes_per_node": { "type": "integer", "minimum": 0, }, "volumes_size": { "type": ["integer", "null"], "minimum": 1, }, "volume_type": { "type": ["string", "null"], }, "volumes_availability_zone": { "type": ["string", "null"], }, "volume_mount_prefix": { "type": ["string", "null"], "format": "posix_path", }, "description": { "type": ["string", "null"], }, "floating_ip_pool": { "type": ["string", "null"], }, "security_groups": { "type": ["array", "null"], "items": {"type": "string"} }, "auto_security_group": { "type": ["boolean", "null"], }, "availability_zone": { "type": ["string", "null"], }, "is_proxy_gateway": { "type": ["boolean", "null"], }, "volume_local_to_instance": { "type": ["boolean", "null"] }, "shares": copy.deepcopy(shares.SHARE_SCHEMA), "use_autoconfig": { "type": ["boolean", "null"] }, "is_public": { "type": ["boolean", "null"], }, "is_protected": { "type": ["boolean", "null"], } }, "additionalProperties": False, "required": [ "name", "flavor_id", "plugin_name", "hadoop_version", "node_processes", ] } # APIv2: renaming hadoop_version -> plugin_version NODE_GROUP_TEMPLATE_SCHEMA_V2 = copy.deepcopy(NODE_GROUP_TEMPLATE_SCHEMA) del NODE_GROUP_TEMPLATE_SCHEMA_V2["properties"]["hadoop_version"] NODE_GROUP_TEMPLATE_SCHEMA_V2["required"].remove("hadoop_version") NODE_GROUP_TEMPLATE_SCHEMA_V2["properties"].update({ "plugin_version": { "type": "string", }}) NODE_GROUP_TEMPLATE_SCHEMA_V2["required"].append("plugin_version") NODE_GROUP_TEMPLATE_SCHEMA_V2["properties"].update({ "boot_from_volume": { "type": "boolean", }, "boot_volume_type": { "type": "string", }, "boot_volume_availability_zone": { "type": "string", }, "boot_volume_local_to_instance": { "type": "boolean", }}) # For an update we do not require any fields but we want the given # fields to be validated NODE_GROUP_TEMPLATE_UPDATE_SCHEMA = copy.copy(NODE_GROUP_TEMPLATE_SCHEMA) NODE_GROUP_TEMPLATE_UPDATE_SCHEMA["required"] = [] NODE_GROUP_TEMPLATE_UPDATE_SCHEMA_V2 = copy.copy(NODE_GROUP_TEMPLATE_SCHEMA_V2) NODE_GROUP_TEMPLATE_UPDATE_SCHEMA_V2["required"] = [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/node_group_templates.py0000664000175000017500000000754000000000000025073 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.api import v10 as api import sahara.service.validations.base as b from sahara.service.validations import shares def check_node_group_template_create(data, **kwargs): plugin_version = 'hadoop_version' if data.get('plugin_version'): plugin_version = 'plugin_version' b.check_node_group_template_unique_name(data['name']) b.check_plugin_name_exists(data['plugin_name']) b.check_plugin_supports_version(data['plugin_name'], data[plugin_version]) b.check_node_group_basic_fields(data['plugin_name'], data[plugin_version], data) if data.get('image_id'): b.check_image_registered(data['image_id']) b.check_required_image_tags(data['plugin_name'], data[plugin_version], data['image_id']) if data.get('shares'): shares.check_shares(data['shares']) def check_node_group_template_usage(node_group_template_id, **kwargs): cluster_users = [] template_users = [] for cluster in api.get_clusters(): if (node_group_template_id in [node_group.node_group_template_id for node_group in cluster.node_groups]): cluster_users += [cluster.name] for cluster_template in api.get_cluster_templates(): if (node_group_template_id in [node_group.node_group_template_id for node_group in cluster_template.node_groups]): template_users += [cluster_template.name] if cluster_users or template_users: raise ex.InvalidReferenceException( _("Node group template %(template)s is in use by " "cluster templates: %(users)s; and clusters: %(clusters)s") % {'template': node_group_template_id, 'users': template_users and ', '.join(template_users) or 'N/A', 'clusters': cluster_users and ', '.join(cluster_users) or 'N/A'}) def check_node_group_template_update(node_group_template_id, data, **kwargs): plugin_version = 'hadoop_version' if data.get('plugin_version'): plugin_version = 'plugin_version' if data.get('plugin_name') and not data.get(plugin_version): raise ex.InvalidReferenceException( _("You must specify a %s value " "for your plugin_name") % plugin_version) if data.get('plugin_name'): plugin = data.get('plugin_name') version = data.get(plugin_version) b.check_plugin_name_exists(plugin) b.check_plugin_supports_version(plugin, version) else: ngt = api.get_node_group_template(node_group_template_id) plugin = ngt.plugin_name if data.get(plugin_version): version = data.get(plugin_version) b.check_plugin_supports_version(plugin, version) else: version = ngt.hadoop_version if data.get('image_id'): b.check_image_registered(data['image_id']) b.check_required_image_tags(plugin, version, data['image_id']) b.check_node_group_basic_fields(plugin, version, data) if data.get('shares'): shares.check_shares(data['shares']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/plugins.py0000664000175000017500000000214700000000000022333 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sahara.exceptions as ex from sahara.i18n import _ from sahara.plugins import base def plugin_update_validation_jsonschema(): return base.PLUGINS.get_plugin_update_validation_jsonschema() def check_convert_to_template(plugin_name, version, **kwargs): raise ex.InvalidReferenceException( _("Requested plugin '%s' doesn't support converting config files " "to cluster templates") % plugin_name) def check_plugin_update(plugin_name, data, **kwargs): base.PLUGINS.validate_plugin_update(plugin_name, data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/validations/shares.py0000664000175000017500000000461200000000000022136 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.edp.utils import shares from sahara.utils.openstack import manila SHARE_SCHEMA = { "type": "array", "items": { "type": "object", "properties": { "id": { "type": "string", "format": "uuid" }, "path": { "type": ["string", "null"] }, "access_level": { "type": ["string", "null"], "enum": ["rw", "ro"], "default": "rw" } }, "additionalProperties": False, "required": [ "id" ] } } def check_shares(data): if not data: return paths = (share.get('path') for share in data) paths = [path for path in paths if path is not None] if len(paths) != len(set(paths)): raise ex.InvalidDataException( _('Multiple shares cannot be mounted to the same path.')) for path in paths: if not path.startswith('/') or '\x00' in path: raise ex.InvalidDataException( _('Paths must be absolute Linux paths starting with "/" ' 'and may not contain nulls.')) client = manila.client() for share in data: manila_share = manila.get_share(client, share['id']) if not manila_share: raise ex.InvalidReferenceException( _("Requested share id %s does not exist.") % share['id']) share_type = manila_share.share_proto if share_type not in shares.SUPPORTED_SHARE_TYPES: raise ex.InvalidReferenceException( _("Requested share id %(id)s is of type %(type)s, which is " "not supported by Sahara.") % {"id": share['id'], "type": share_type}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/service/volumes.py0000664000175000017500000002256000000000000020030 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import threading from oslo_config import cfg from oslo_log import log as logging from sahara import conductor as c from sahara import context from sahara.i18n import _ from sahara.plugins import provisioning as plugin_base from sahara.utils import cluster_progress_ops as cpo from sahara.utils.openstack import base as b from sahara.utils.openstack import cinder from sahara.utils.openstack import nova from sahara.utils import poll_utils conductor = c.API LOG = logging.getLogger(__name__) CONF = cfg.CONF def _get_timeout_for_disk_preparing(cluster): configs = cluster.cluster_configs.to_dict() option_name = plugin_base.DISKS_PREPARING_TIMEOUT.name option_target = plugin_base.DISKS_PREPARING_TIMEOUT.applicable_target try: return int(configs[option_target][option_name]) except Exception: return int(plugin_base.DISKS_PREPARING_TIMEOUT.default_value) def _is_xfs_enabled(cluster): configs = cluster.cluster_configs.to_dict() option_name = plugin_base.XFS_ENABLED.name option_target = plugin_base.XFS_ENABLED.applicable_target try: return bool(configs[option_target][option_name]) except Exception: return bool(plugin_base.XFS_ENABLED.default_value) def _get_os_distrib(remote): return remote.get_os_distrib() def _check_installed_xfs(instance): redhat = "rpm -q xfsprogs || yum install -y xfsprogs" debian = "dpkg -s xfsprogs || apt-get -y install xfsprogs" cmd_map = { "centos": redhat, "fedora": redhat, "redhatenterpriseserver": redhat, "redhat": redhat, "ubuntu": debian, 'debian': debian } with instance.remote() as r: distro = _get_os_distrib(r) if not cmd_map.get(distro): LOG.warning("Cannot verify installation of XFS tools for " "unknown distro {distro}.".format(distro=distro)) return False try: r.execute_command(cmd_map.get(distro), run_as_root=True) return True except Exception as e: LOG.warning("Cannot install xfsprogs: {reason}".format(reason=e)) return False def _can_use_xfs(instances): cluster = instances[0].cluster if not _is_xfs_enabled(cluster): return False for instance in instances: if not _check_installed_xfs(instance): return False return True def mount_to_instances(instances): if len(instances) == 0: return use_xfs = _can_use_xfs(instances) for instance in instances: with context.set_current_instance_id(instance.instance_id): devices = _find_instance_devices(instance) if devices: cpo.add_provisioning_step( instance.cluster_id, _("Mount volumes to {inst_name} instance").format( inst_name=instance.instance_name), len(devices)) formatted_devices = [] lock = threading.Lock() with context.ThreadGroup() as tg: # Since formating can take several minutes (for large # disks) and can be done in parallel, launch one thread # per disk. for device in devices: tg.spawn('format-device-%s' % device, _format_device, instance, device, use_xfs, formatted_devices, lock) conductor.instance_update( context.current(), instance, {"storage_devices_number": len(formatted_devices)}) for idx, dev in enumerate(formatted_devices): _mount_volume_to_node(instance, idx+1, dev, use_xfs) def _find_instance_devices(instance): with instance.remote() as r: code, attached_info = r.execute_command( "lsblk -r | awk '$6 ~ /disk/ || /part/ {print \"/dev/\" $1}'") attached_dev = attached_info.split() code, mounted_info = r.execute_command( "mount | awk '$1 ~ /^\/dev/ {print $1}'") mounted_dev = mounted_info.split() # find and ignore Nova config drive for label in ("config-2", "CONFIG-2"): code, nova_config_drive = r.execute_command( "/sbin/blkid -t LABEL=\"%s\" -odevice" % label, raise_when_error=False, run_as_root=True ) drive_name = nova_config_drive.strip() if code == 0 and drive_name in attached_dev: attached_dev.remove(drive_name) break # filtering attached devices, that should not be mounted for dev in attached_dev[:]: idx = re.sub("\D", "", dev) if idx: if dev in mounted_dev: if re.sub("\d", "", dev) in attached_dev: attached_dev.remove(re.sub("\d", "", dev)) attached_dev.remove(dev) for dev in attached_dev[:]: if re.sub("\D", "", dev): if re.sub("\d", "", dev) in attached_dev: attached_dev.remove(dev) attached_dev = [dev for dev in attached_dev if dev not in mounted_dev] return attached_dev @cpo.event_wrapper(mark_successful_on_exit=True) def _mount_volume_to_node(instance, index, device, use_xfs): LOG.debug("Mounting volume {device} to instance".format(device=device)) mount_point = instance.node_group.volume_mount_prefix + str(index) _mount_volume(instance, device, mount_point, use_xfs) LOG.debug("Mounted volume to instance") def _format_device( instance, device, use_xfs, formatted_devices=None, lock=None): with instance.remote() as r: try: timeout = _get_timeout_for_disk_preparing(instance.cluster) # Format devices with better performance options: # - reduce number of blocks reserved for root to 1% # - use 'dir_index' for faster directory listings # - use 'extents' to work faster with large files # - disable journaling fs_opts = '-F -m 1 -O dir_index,extents,^has_journal' command = 'sudo mkfs.ext4 %s %s' % (fs_opts, device) if use_xfs: command = 'sudo mkfs.xfs -f %s' % device r.execute_command(command, timeout=timeout) if lock: with lock: formatted_devices.append(device) except Exception as e: LOG.warning("Device {dev} cannot be formatted: {reason}".format( dev=device, reason=e)) cpo.add_fail_event(instance, e) def _mount_volume(instance, device_path, mount_point, use_xfs): with instance.remote() as r: try: timeout = _get_timeout_for_disk_preparing(instance.cluster) # Mount volumes with better performance options: # - enable write-back for ext4 # - do not store access time # - disable barrier for xfs r.execute_command('sudo mkdir -p %s' % mount_point) mount_opts = '-o data=writeback,noatime,nodiratime' if use_xfs: mount_opts = "-t xfs -o noatime,nodiratime,nobarrier" r.execute_command('sudo mount %s %s %s' % (mount_opts, device_path, mount_point), timeout=timeout) r.execute_command( 'sudo sh -c "grep %s /etc/mtab >> /etc/fstab"' % device_path) except Exception: LOG.error("Error mounting volume to instance") raise def detach_from_instance(instance): for volume_id in instance.volumes: _detach_volume(instance, volume_id) _delete_volume(volume_id) @poll_utils.poll_status( 'detach_volume_timeout', _("Await for volume become detached"), sleep=2) def _await_detach(volume_id): volume = cinder.get_volume(volume_id) if volume.status not in ['available', 'error']: return False return True def _detach_volume(instance, volume_id): volume = cinder.get_volume(volume_id) try: LOG.debug("Detaching volume {id} from instance".format(id=volume_id)) b.execute_with_retries(nova.client().volumes.delete_server_volume, instance.instance_id, volume_id) except Exception: LOG.error("Can't detach volume {id}".format(id=volume.id)) detach_timeout = CONF.timeouts.detach_volume_timeout LOG.debug("Waiting {timeout} seconds to detach {id} volume".format( timeout=detach_timeout, id=volume_id)) _await_detach(volume_id) def _delete_volume(volume_id): LOG.debug("Deleting volume {volume}".format(volume=volume_id)) volume = cinder.get_volume(volume_id) try: b.execute_with_retries(volume.delete) except Exception: LOG.error("Can't delete volume {volume}".format(volume=volume.id)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/swift/0000775000175000017500000000000000000000000015453 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/swift/__init__.py0000664000175000017500000000000000000000000017552 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/swift/resources/0000775000175000017500000000000000000000000017465 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/swift/resources/conf-template.xml0000664000175000017500000000447000000000000022752 0ustar00zuulzuul00000000000000 fs.swift.service.sahara.auth.url fs.swift.service.sahara.tenant fs.swift.service.sahara.username fs.swift.service.sahara.password fs.swift.service.sahara.http.port 8080 fs.swift.service.sahara.https.port 443 fs.swift.service.sahara.public true fs.swift.service.sahara.auth.endpoint.prefix /endpoints/AUTH_ fs.swift.service.sahara.region fs.swift.service.sahara.apikey fs.swift.impl org.apache.hadoop.fs.swift.snative.SwiftNativeFileSystem fs.swift.connect.timeout fs.swift.socket.timeout fs.swift.connect.retry.count fs.swift.connect.throttle.delay fs.swift.blocksize fs.swift.partsize fs.swift.requestsize ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/swift/swift_helper.py0000664000175000017500000001071600000000000020525 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from sahara import context from sahara.plugins import swift_utils as su from sahara.utils import xmlutils as x LOG = logging.getLogger(__name__) CONF = cfg.CONF HADOOP_SWIFT_AUTH_URL = 'fs.swift.service.sahara.auth.url' HADOOP_SWIFT_TENANT = 'fs.swift.service.sahara.tenant' HADOOP_SWIFT_USERNAME = 'fs.swift.service.sahara.username' HADOOP_SWIFT_PASSWORD = 'fs.swift.service.sahara.password' HADOOP_SWIFT_REGION = 'fs.swift.service.sahara.region' HADOOP_SWIFT_TRUST_ID = 'fs.swift.service.sahara.trust.id' HADOOP_SWIFT_DOMAIN_NAME = 'fs.swift.service.sahara.domain.name' opts = [ cfg.StrOpt("public_identity_ca_file", help=("Location of ca certificate file to use for identity " "client requests via public endpoint")), cfg.StrOpt("public_object_store_ca_file", help=("Location of ca certificate file to use for object-store " "client requests via public endpoint")) ] public_endpoint_cert_group = cfg.OptGroup( name="object_store_access", title="Auth options for Swift access from VM") CONF.register_group(public_endpoint_cert_group) CONF.register_opts(opts, group=public_endpoint_cert_group) def retrieve_tenant(): return context.current().tenant_name def get_swift_configs(): configs = x.load_hadoop_xml_defaults('swift/resources/conf-template.xml') for conf in configs: if conf['name'] == HADOOP_SWIFT_AUTH_URL: conf['value'] = su.retrieve_auth_url() + "auth/tokens/" if conf['name'] == HADOOP_SWIFT_TENANT: conf['value'] = retrieve_tenant() if CONF.os_region_name and conf['name'] == HADOOP_SWIFT_REGION: conf['value'] = CONF.os_region_name if conf['name'] == HADOOP_SWIFT_DOMAIN_NAME: # NOTE(jfreud): Don't be deceived here... Even though there is an # attribute provided by context called domain_name, it is used for # domain scope, and hadoop-swiftfs always authenticates using # project scope. The purpose of the setting below is to override # the default value for project domain and user domain, domain id # as 'default', which may not always be correct. # TODO(jfreud): When hadoop-swiftfs allows it, stop hoping that # project_domain_name is always equal to user_domain_name. conf['value'] = context.current().project_domain_name result = [cfg for cfg in configs if cfg['value']] LOG.info("Swift would be integrated with the following " "params: {result}".format(result=result)) return result def read_default_swift_configs(): return x.load_hadoop_xml_defaults('swift/resources/conf-template.xml') def install_ssl_certs(instances): certs = [] if CONF.object_store_access.public_identity_ca_file: certs.append(CONF.object_store_access.public_identity_ca_file) if CONF.object_store_access.public_object_store_ca_file: certs.append(CONF.object_store_access.public_object_store_ca_file) if not certs: return with context.ThreadGroup() as tg: for inst in instances: tg.spawn("configure-ssl-cert-%s" % inst.instance_id, _install_ssl_certs, inst, certs) def _install_ssl_certs(instance, certs): register_cmd = ( "sudo su - -c \"keytool -import -alias sahara-%d -keystore " "`cut -f2 -d \\\"=\\\" /etc/profile.d/99-java.sh | head -1`" "/lib/security/cacerts -file /tmp/cert.pem -noprompt -storepass " "changeit\"") with instance.remote() as r: for idx, cert in enumerate(certs): with open(cert) as cert_fd: data = cert_fd.read() r.write_file_to("/tmp/cert.pem", data) try: r.execute_command(register_cmd % idx) finally: r.execute_command("rm /tmp/cert.pem") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/swift/utils.py0000664000175000017500000000267000000000000017172 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import six from six.moves.urllib import parse as urlparse from sahara.utils.openstack import base as clients_base CONF = cfg.CONF SWIFT_INTERNAL_PREFIX = "swift://" SWIFT_URL_SUFFIX_START = '.' SWIFT_URL_SUFFIX = SWIFT_URL_SUFFIX_START + 'sahara' def retrieve_auth_url(endpoint_type="publicURL"): """This function returns auth url v3 api. """ version_suffix = 'v3' # return auth url with trailing slash return clients_base.retrieve_auth_url( endpoint_type=endpoint_type, version=version_suffix) + "/" def inject_swift_url_suffix(url): if isinstance(url, six.string_types) and url.startswith("swift://"): u = urlparse.urlparse(url) if not u.netloc.endswith(SWIFT_URL_SUFFIX): return url.replace(u.netloc, u.netloc + SWIFT_URL_SUFFIX, 1) return url ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/tests/0000775000175000017500000000000000000000000015461 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/README.rst0000664000175000017500000000034000000000000017145 0ustar00zuulzuul00000000000000===================== Sahara Testing Infra ===================== This README file attempts to provide current and prospective contributors with everything they need to know in order to start creating unit tests for Sahara. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/__init__.py0000664000175000017500000000117400000000000017575 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara.utils import patches patches.patch_all() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/tests/unit/0000775000175000017500000000000000000000000016440 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/__init__.py0000664000175000017500000000000000000000000020537 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/tests/unit/api/0000775000175000017500000000000000000000000017211 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/api/__init__.py0000664000175000017500000000000000000000000021310 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/tests/unit/api/middleware/0000775000175000017500000000000000000000000021326 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/api/middleware/__init__.py0000664000175000017500000000000000000000000023425 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/api/middleware/test_auth_valid.py0000664000175000017500000000677300000000000025074 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import webob.dec from sahara.api.middleware import auth_valid from sahara.tests.unit import base as test_base class AuthValidatorTest(test_base.SaharaTestCase): tid1 = '8f9f0c8c4c634d6280e785b44a10b8ab' tid2 = '431c8ab1f14f4607bdfc17e05b3924d1' @staticmethod @webob.dec.wsgify def application(req): return "Banana" def setUp(self): super(AuthValidatorTest, self).setUp() self.app = auth_valid.AuthValidator(self.application) def test_auth_ok_project_id_in_url(self): req = webob.Request.blank("/v1.1/%s/clusters" % self.tid1, accept="text/plain", method="GET", environ={"HTTP_X_TENANT_ID": self.tid1}) res = req.get_response(self.app) self.assertEqual(200, res.status_code) def test_auth_ok_no_project_id_in_url(self): req = webob.Request.blank("/v1.1/clusters", accept="text/plain", method="GET", environ={"HTTP_X_TENANT_ID": self.tid1}) res = req.get_response(self.app) self.assertEqual(200, res.status_code) def test_auth_ok_without_path(self): req = webob.Request.blank("/", accept="text/plain", method="GET", environ={"HTTP_X_TENANT_ID": self.tid1}) res = req.get_response(self.app) self.assertEqual(200, res.status_code) def test_auth_without_environ(self): req = webob.Request.blank("/v1.1/%s/clusters" % self.tid1, accept="text/plain", method="GET") res = req.get_response(self.app) self.assertEqual(503, res.status_code) def test_auth_with_wrong_url(self): req = webob.Request.blank("/v1.1", accept="text/plain", method="GET", environ={"HTTP_X_TENANT_ID": self.tid1}) res = req.get_response(self.app) self.assertEqual(404, res.status_code) def test_auth_different_tenant(self): req = webob.Request.blank("/v1.1/%s/clusters" % self.tid1, accept="text/plain", method="GET", environ={"HTTP_X_TENANT_ID": self.tid2}) res = req.get_response(self.app) self.assertEqual(401, res.status_code) def test_auth_tenant_id_in_url_v2(self): # NOTE(jfreud): we expect AuthValidator to let this case pass through # although Flask will reject it with a 404 further down the pipeline req = webob.Request.blank("/v2/%s/clusters" % self.tid1, accept="text/plain", method="GET", environ={"HTTP_X_TENANT_ID": self.tid1}) res = req.get_response(self.app) self.assertEqual(200, res.status_code) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/api/test_acl.py0000664000175000017500000000322700000000000021365 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_policy import policy as cpolicy from sahara.api import acl from sahara import exceptions as ex from sahara.tests.unit import base class TestAcl(base.SaharaTestCase): def _set_policy(self, json): acl.setup_policy() rules = cpolicy.Rules.load_json(json) acl.ENFORCER.set_rules(rules, use_conf=False) @mock.patch('oslo_config.cfg.ConfigOpts.find_file') def test_policy_allow(self, mock_config): mock_config.return_value = '/etc/sahara/' @acl.enforce("data-processing:clusters:get_all") def test(): pass json = '{"data-processing:clusters:get_all": ""}' self._set_policy(json) test() @mock.patch('oslo_config.cfg.ConfigOpts.find_file') def test_policy_deny(self, mock_config): mock_config.return_value = '/etc/sahara/' @acl.enforce("data-processing:clusters:get_all") def test(): pass json = '{"data-processing:clusters:get_all": "!"}' self._set_policy(json) self.assertRaises(ex.Forbidden, test) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/base.py0000664000175000017500000000443300000000000017730 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslotest import base from sahara import context from sahara.db import api as db_api from sahara import main from sahara.utils import rpc class SaharaTestCase(base.BaseTestCase): def setUp(self): super(SaharaTestCase, self).setUp() self.setup_context() rpc.setup('all-in-one') def setup_context(self, username="test_user", tenant_id="tenant_1", auth_token="test_auth_token", tenant_name='test_tenant', service_catalog=None, **kwargs): self.addCleanup(context.set_ctx, context.ctx() if context.has_ctx() else None) context.set_ctx(context.Context( username=username, tenant_id=tenant_id, auth_token=auth_token, service_catalog=service_catalog or {}, tenant_name=tenant_name, **kwargs)) def override_config(self, name, override, group=None): main.CONF.set_override(name, override, group) self.addCleanup(main.CONF.clear_override, name, group) class SaharaWithDbTestCase(SaharaTestCase): def setUp(self): super(SaharaWithDbTestCase, self).setUp() self.override_config('connection', "sqlite://", group='database') db_api.setup_db() self.addCleanup(db_api.drop_db) class _ConsecutiveThreadGroup(context.ThreadGroup): def __init__(self, _thread_pool_size=1000): pass def spawn(self, thread_description, func, *args, **kwargs): func(*args, **kwargs) def __enter__(self): return self def __exit__(self, *ex): pass def mock_thread_group(func): return mock.patch('sahara.context.ThreadGroup', new=_ConsecutiveThreadGroup)(func) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.749891 sahara-16.0.0/sahara/tests/unit/cli/0000775000175000017500000000000000000000000017207 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/cli/__init__.py0000664000175000017500000000000000000000000021306 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.753891 sahara-16.0.0/sahara/tests/unit/cli/image_pack/0000775000175000017500000000000000000000000021267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/cli/image_pack/__init__.py0000664000175000017500000000000000000000000023366 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/cli/image_pack/test_image_pack_api.py0000664000175000017500000000551000000000000025612 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock import sys guestfs = mock.Mock() sys.modules['guestfs'] = guestfs from sahara.cli.image_pack import api from sahara.tests.unit import base class TestSaharaImagePackAPI(base.SaharaTestCase): def setUp(self): super(TestSaharaImagePackAPI, self).setUp() def tearDown(self): super(TestSaharaImagePackAPI, self).tearDown() @mock.patch('sahara.cli.image_pack.api.guestfs') @mock.patch('sahara.cli.image_pack.api.plugins_base') @mock.patch('sahara.cli.image_pack.api.LOG') def test_pack_image_call(self, mock_log, mock_plugins_base, mock_guestfs): guest = mock.Mock() mock_guestfs.GuestFS = mock.Mock(return_value=guest) guest.inspect_os = mock.Mock(return_value=['/dev/something1']) plugin = mock.Mock() mock_plugins_base.PLUGINS = mock.Mock( get_plugin=mock.Mock(return_value=plugin)) api.pack_image( "image_path", "plugin_name", "plugin_version", {"anarg": "avalue"}, root_drive=None, test_only=False) guest.add_drive_opts.assert_called_with("image_path", format="qcow2") guest.set_network.assert_called_with(True) guest.launch.assert_called_once_with() guest.mount.assert_called_with('/dev/something1', '/') guest.sh.assert_called_with("echo Testing sudo without tty...") guest.sync.assert_called_once_with() guest.umount_all.assert_called_once_with() guest.close.assert_called_once_with() @mock.patch('sahara.cli.image_pack.api.plugins_base') def test_get_plugin_arguments(self, mock_plugins_base): api.setup_plugins() mock_plugins_base.setup_plugins.assert_called_once_with() mock_PLUGINS = mock.Mock() mock_plugins_base.PLUGINS = mock_PLUGINS mock_plugin = mock.Mock() mock_plugin.get_versions = mock.Mock(return_value=['1']) mock_plugin.get_image_arguments = mock.Mock( return_value=["Argument!"]) mock_PLUGINS.get_plugin = mock.Mock(return_value=mock_plugin) result = api.get_plugin_arguments('Plugin!') mock_plugin.get_versions.assert_called_once_with() mock_plugin.get_image_arguments.assert_called_once_with('1') self.assertEqual(result, {'1': ['Argument!']}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/cli/test_sahara_cli.py0000664000175000017500000000575100000000000022716 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from sahara.cli import sahara_all from sahara.cli import sahara_api from sahara.cli import sahara_engine from sahara.tests.unit import base class TestSaharaCLI(base.SaharaTestCase): def setUp(self): super(TestSaharaCLI, self).setUp() modules = [ 'sahara.main.setup_common', 'oslo_service.wsgi.Server.__init__', 'oslo_service.wsgi.Loader' ] self.patchers = [] for module in modules: patch = mock.patch(module) patch.start() self.patchers.append(patch) mock_get_pl_patch = mock.patch('sahara.main.get_process_launcher') self.patchers.append(mock_get_pl_patch) self.mock_get_pl = mock_get_pl_patch.start() mock_start_server_patch = mock.patch( 'sahara.main.SaharaWSGIService.start') self.patchers.append(mock_start_server_patch) self.mock_start_server = mock_start_server_patch.start() def tearDown(self): super(TestSaharaCLI, self).tearDown() for patcher in reversed(self.patchers): patcher.stop() @mock.patch('oslo_config.cfg.ConfigOpts.find_file') @mock.patch('sahara.main.setup_sahara_api') def test_main_start_api(self, mock_setup_sahara_api, mock_config): mock_config.return_value = '/etc/sahara/' sahara_api.main() self.mock_start_server.assert_called_once_with() self.mock_get_pl.return_value.wait.assert_called_once_with() @mock.patch('sahara.utils.rpc.RPCServer.get_service') @mock.patch('oslo_service.service.ProcessLauncher') @mock.patch('sahara.main._get_ops_driver') @mock.patch('sahara.service.ops.OpsServer') def test_main_start_engine(self, mock_ops_server, mock_get_ops_driver, mock_pl, mock_get_service): self.mock_get_pl.return_value = mock_pl mock_ops_server.return_value.get_service.return_value = ( mock_get_service) sahara_engine.main() mock_pl.launch_service.assert_called_once_with(mock_get_service) mock_pl.wait.assert_called_once_with() @mock.patch('oslo_config.cfg.ConfigOpts.find_file') def test_main_start_all(self, mock_config): mock_config.return_value = '/etc/sahara/' sahara_all.main() self.mock_start_server.assert_called_once_with() self.mock_get_pl.return_value.wait.assert_called_once_with() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/cli/test_sahara_status.py0000664000175000017500000000265500000000000023472 0ustar00zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_upgradecheck.upgradecheck import Code from sahara.cli import sahara_status from sahara.tests.unit import base class TestUpgradeChecks(base.SaharaTestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() self.cmd = sahara_status.Checks() @mock.patch('oslo_config.cfg.ConfigOpts.find_file') @mock.patch('oslo_utils.fileutils.is_json') def test_checks(self, mock_util, mock_config): mock_config.return_value = '/etc/sahara/' mock_util.return_value = False for name, func in self.cmd._upgrade_checks: if isinstance(func, tuple): func_name, kwargs = func result = func_name(self, **kwargs) else: result = func(self) self.assertEqual(Code.SUCCESS, result.code) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.753891 sahara-16.0.0/sahara/tests/unit/conductor/0000775000175000017500000000000000000000000020440 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/__init__.py0000664000175000017500000000000000000000000022537 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/base.py0000664000175000017500000000344100000000000021726 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara.conductor import manager from sahara.tests.unit import base class ConductorManagerTestCase(base.SaharaWithDbTestCase): def __init__(self, *args, **kwargs): """List of check callables could be specified. All return values from callables will be stored in setUp and checked in tearDown. """ self._checks = kwargs.pop("checks", []) super(ConductorManagerTestCase, self).__init__(*args, **kwargs) def setUp(self): super(ConductorManagerTestCase, self).setUp() self.api = manager.ConductorManager() self._results = [] for check in self._checks: self._results.append(copy.deepcopy(check())) def tearDown(self): for idx, check in enumerate(self._checks): check_val = check() self.assertEqual(self._results[idx], check_val, message="Check '%s' failed" % idx) super(ConductorManagerTestCase, self).tearDown() def assert_protected_resource_exception(self, ex): self.assertIn("marked as protected", str(ex)) def assert_created_in_another_tenant_exception(self, ex): self.assertIn("wasn't created in this tenant", str(ex)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.753891 sahara-16.0.0/sahara/tests/unit/conductor/manager/0000775000175000017500000000000000000000000022052 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/manager/__init__.py0000664000175000017500000000000000000000000024151 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/manager/test_clusters.py0000664000175000017500000003502400000000000025333 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from unittest import mock from sqlalchemy import exc as sa_exc import testtools from sahara.conductor import manager from sahara import context from sahara.db.sqlalchemy import models as m from sahara import exceptions as ex import sahara.tests.unit.conductor.base as test_base from sahara.utils import cluster as c_u SAMPLE_CLUSTER = { "plugin_name": "test_plugin", "hadoop_version": "test_version", "tenant_id": "tenant_1", "name": "test_cluster", "user_keypair_id": "my_keypair", "node_groups": [ { "name": "ng_1", "flavor_id": "42", "node_processes": ["p1", "p2"], "count": 1, "security_groups": None, 'use_autoconfig': True, "shares": None }, { "name": "ng_2", "flavor_id": "42", "node_processes": ["p3", "p4"], "count": 3, "security_groups": ["group1", "group2"], 'use_autoconfig': True, "shares": None } ], "cluster_configs": { "service_1": { "config_2": "value_2" }, "service_2": { "config_1": "value_1" } }, "shares": [], "is_public": False, "is_protected": False } class ClusterTest(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(ClusterTest, self).__init__( checks=[ lambda: SAMPLE_CLUSTER, lambda: manager.CLUSTER_DEFAULTS, lambda: manager.NODE_GROUP_DEFAULTS, lambda: manager.INSTANCE_DEFAULTS, ], *args, **kwargs) def test_cluster_create_list_update_delete(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) self.assertIsInstance(cluster_db_obj, dict) lst = self.api.cluster_get_all(ctx) self.assertEqual(1, len(lst)) cl_id = lst[0]["id"] updated_cl = self.api.cluster_update( ctx, cl_id, {"is_public": True}) self.assertIsInstance(updated_cl, dict) self.assertEqual(True, updated_cl["is_public"]) self.api.cluster_destroy(ctx, cl_id) lst = self.api.cluster_get_all(ctx) self.assertEqual(0, len(lst)) with testtools.ExpectedException(ex.NotFoundException): self.api.cluster_destroy(ctx, cl_id) def test_duplicate_cluster_create(self): ctx = context.ctx() self.api.cluster_create(ctx, SAMPLE_CLUSTER) with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.cluster_create(ctx, SAMPLE_CLUSTER) def test_cluster_fields(self): ctx = context.ctx() cl_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) self.assertIsInstance(cl_db_obj, dict) for key, val in SAMPLE_CLUSTER.items(): if key == 'node_groups': # this will be checked separately continue self.assertEqual(val, cl_db_obj.get(key), "Key not found %s" % key) for ng in cl_db_obj["node_groups"]: ng.pop("created_at") ng.pop("updated_at") ng.pop("id") self.assertEqual(cl_db_obj["id"], ng.pop("cluster_id")) ng.pop("image_id") self.assertEqual([], ng.pop("instances")) ng.pop("node_configs") ng.pop("node_group_template_id") ng.pop("volume_mount_prefix") ng.pop("volumes_size") ng.pop("volumes_per_node") ng.pop("volumes_availability_zone") ng.pop("volume_type") ng.pop("floating_ip_pool") ng.pop("boot_from_volume") ng.pop("boot_volume_type") ng.pop("boot_volume_availability_zone") ng.pop("boot_volume_local_to_instance") ng.pop("image_username") ng.pop("open_ports") ng.pop("auto_security_group") ng.pop("is_proxy_gateway") ng.pop("tenant_id") ng.pop("availability_zone") ng.pop('volume_local_to_instance') self.assertEqual(SAMPLE_CLUSTER["node_groups"], cl_db_obj["node_groups"]) def test_cluster_no_ng(self): ctx = context.ctx() cluster_schema = copy.deepcopy(SAMPLE_CLUSTER) cluster_schema.pop('node_groups') cl_db_obj = self.api.cluster_create(ctx, cluster_schema) self.assertIsInstance(cl_db_obj, dict) for key, val in cluster_schema.items(): self.assertEqual(val, cl_db_obj.get(key), "Key not found %s" % key) self.assertEqual([], cl_db_obj["node_groups"]) def test_cluster_update_status(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] updated_cl = self.api.cluster_update( ctx, _id, {"status": c_u.CLUSTER_STATUS_ACTIVE}) self.assertIsInstance(updated_cl, dict) self.assertEqual(c_u.CLUSTER_STATUS_ACTIVE, updated_cl["status"]) get_cl_obj = self.api.cluster_get(ctx, _id) self.assertEqual(updated_cl, get_cl_obj) with testtools.ExpectedException(ex.NotFoundException): self.api.cluster_update( ctx, "bad_id", {"status": c_u.CLUSTER_STATUS_ACTIVE}) def _ng_in_cluster(self, cluster_db_obj, ng_id): for ng in cluster_db_obj["node_groups"]: if ng["id"] == ng_id: return ng return None def test_add_node_group(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] node_group = { "name": "ng_3", "flavor_id": "42", "node_processes": ["p3", "p4"], "count": 5 } ng_id = self.api.node_group_add(ctx, _id, node_group) cluster_db_obj = self.api.cluster_get(ctx, _id) found_ng = self._ng_in_cluster(cluster_db_obj, ng_id) self.assertTrue(found_ng, "New Node Group not found") def test_update_node_group(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] self.assertEqual(2, len(cluster_db_obj["node_groups"])) ng_id = cluster_db_obj["node_groups"][-1]["id"] self.api.node_group_update(ctx, ng_id, {"image_id": "test_image"}) cluster_db_obj = self.api.cluster_get(ctx, _id) found_ng = self._ng_in_cluster(cluster_db_obj, ng_id) self.assertTrue(found_ng, "Updated Node Group not found") for ng in cluster_db_obj["node_groups"]: if ng["id"] != ng_id: continue self.assertEqual("test_image", ng["image_id"]) def test_delete_node_group(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] ng_id = cluster_db_obj["node_groups"][-1]["id"] self.api.node_group_remove(ctx, ng_id) cluster_db_obj = self.api.cluster_get(ctx, _id) found_ng = self._ng_in_cluster(cluster_db_obj, ng_id) self.assertFalse(found_ng, "Node Group is still in a CLuster") with testtools.ExpectedException(ex.NotFoundException): self.api.node_group_remove(ctx, ng_id) def _add_instance(self, ctx, ng_id): instance = { "instance_name": "additional_vm" } return self.api.instance_add(ctx, ng_id, instance) def _add_instance_ipv6(self, ctx, ng_id, instance_name): instance = { "instance_name": instance_name, "internal_ip": "FE80:0000:0000:0000:0202:B3FF:FE1E:8329", "management_ip": "FE80:0000:0000:0000:0202:B3FF:FE1E:8329" } return self.api.instance_add(ctx, ng_id, instance) def test_add_instance(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] ng_id = cluster_db_obj["node_groups"][-1]["id"] count = cluster_db_obj["node_groups"][-1]["count"] self._add_instance(ctx, ng_id) cluster_db_obj = self.api.cluster_get(ctx, _id) for ng in cluster_db_obj["node_groups"]: if ng["id"] != ng_id: continue ng.pop('tenant_id') self.assertEqual(count + 1, ng["count"]) self.assertEqual("additional_vm", ng["instances"][0]["instance_name"]) def test_add_instance_ipv6(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] ng_id = cluster_db_obj["node_groups"][-1]["id"] count = cluster_db_obj["node_groups"][-1]["count"] instance_name = "additional_vm_ipv6" self._add_instance_ipv6(ctx, ng_id, instance_name) cluster_db_obj = self.api.cluster_get(ctx, _id) for ng in cluster_db_obj["node_groups"]: if ng["id"] != ng_id: continue ng.pop('tenant_id') self.assertEqual(count + 1, ng["count"]) self.assertEqual(instance_name, ng["instances"][0]["instance_name"]) def test_update_instance(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] ng_id = cluster_db_obj["node_groups"][-1]["id"] instance_id = self._add_instance(ctx, ng_id) self.api.instance_update(ctx, instance_id, {"management_ip": "1.1.1.1"}) cluster_db_obj = self.api.cluster_get(ctx, _id) for ng in cluster_db_obj["node_groups"]: if ng["id"] != ng_id: continue self.assertEqual("1.1.1.1", ng["instances"][0]["management_ip"]) def test_update_instance_ipv6(self): ctx = context.ctx() ip = "FE80:0000:0000:0000:0202:B3FF:FE1E:8329" cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] ng_id = cluster_db_obj["node_groups"][-1]["id"] instance_id = self._add_instance(ctx, ng_id) self.api.instance_update(ctx, instance_id, {"management_ip": ip}) cluster_db_obj = self.api.cluster_get(ctx, _id) for ng in cluster_db_obj["node_groups"]: if ng["id"] != ng_id: continue self.assertEqual(ip, ng["instances"][0]["management_ip"]) def test_remove_instance(self): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] ng_id = cluster_db_obj["node_groups"][-1]["id"] count = cluster_db_obj["node_groups"][-1]["count"] instance_id = self._add_instance(ctx, ng_id) cluster_db_obj = self.api.cluster_get(ctx, _id) for ng in cluster_db_obj["node_groups"]: if ng["id"] != ng_id: continue self.assertEqual(count + 1, ng["count"]) self.api.instance_remove(ctx, instance_id) cluster_db_obj = self.api.cluster_get(ctx, _id) for ng in cluster_db_obj["node_groups"]: if ng["id"] != ng_id: continue self.assertEqual(count, ng["count"]) with testtools.ExpectedException(ex.NotFoundException): self.api.instance_remove(ctx, instance_id) def test_cluster_search(self): ctx = context.ctx() vals = copy.deepcopy(SAMPLE_CLUSTER) vals['name'] = "test_name" self.api.cluster_create(ctx, vals) lst = self.api.cluster_get_all(ctx) self.assertEqual(1, len(lst)) kwargs = {'name': vals['name'], 'plugin_name': vals['plugin_name']} lst = self.api.cluster_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value kwargs = {'name': vals['name']+'foo'} lst = self.api.cluster_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'name': 'test'} lst = self.api.cluster_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_exc.InvalidRequestError, self.api.cluster_get_all, ctx, **{'badfield': 'somevalue'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_cluster_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.cluster_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.cluster_get_all(ctx, regex_search=True, name="fox") self.assertEqual(1, regex_filter.call_count) args, kwargs = regex_filter.call_args self.assertIs(args[1], m.Cluster) self.assertEqual(args[2], ["name", "description", "plugin_name", "tenant_id"]) self.assertEqual(args[3], {"name": "fox"}) @mock.patch("sahara.service.edp.utils.shares.mount_shares") def test_cluster_update_shares(self, mount_shares): ctx = context.ctx() cluster_db_obj = self.api.cluster_create(ctx, SAMPLE_CLUSTER) _id = cluster_db_obj["id"] test_shares = [ { "id": "bd71d2d5-60a0-4ed9-a3d2-ad312c368880", "path": "/mnt/manila", "access_level": "rw" } ] updated_cl = self.api.cluster_update(ctx, _id, {"shares": test_shares}) self.assertIsInstance(updated_cl, dict) self.assertEqual(test_shares, updated_cl["shares"]) get_cl_obj = self.api.cluster_get(ctx, _id) self.assertEqual(updated_cl, get_cl_obj) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/manager/test_defaults.py0000664000175000017500000000564200000000000025301 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from sahara.conductor import manager from sahara import context import sahara.tests.unit.conductor.base as test_base from sahara.tests.unit.conductor.manager import test_clusters from sahara.utils import general class DefaultsTest(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(DefaultsTest, self).__init__( checks=[ lambda: test_clusters.SAMPLE_CLUSTER, lambda: manager.CLUSTER_DEFAULTS, lambda: manager.NODE_GROUP_DEFAULTS, lambda: manager.INSTANCE_DEFAULTS, ], *args, **kwargs) def _assert_props(self, obj, **rules): for k, v in six.iteritems(rules): self.assertIn(k, obj) self.assertEqual(v, obj[k]) def test_apply_defaults(self): self.assertEqual( {"a": 1, "b": 2, "c": 0}, manager._apply_defaults({"c": 0, "b": 2}, {"a": 1, "b": 2, "c": 3})) def _create_sample_cluster(self): ctx = context.ctx() cluster = self.api.cluster_create(ctx, test_clusters.SAMPLE_CLUSTER) self.assertIsInstance(cluster, dict) return cluster def test_cluster_defaults(self): cluster = self._create_sample_cluster() self._assert_props(cluster, status="undefined", status_description="", info={}) def test_node_group_defaults(self): cluster = self._create_sample_cluster() for ng in cluster['node_groups']: self._assert_props(ng, node_configs={}, volumes_per_node=0, volumes_size=0, volume_mount_prefix="/volumes/disk") def test_instance_defaults(self): ctx = context.ctx() cluster = self._create_sample_cluster() cluster_id = cluster["id"] ng_id = cluster["node_groups"][-1]["id"] self.api.instance_add(ctx, ng_id, { "instance_name": "vm123" }) cluster = self.api.cluster_get(ctx, cluster_id) ng = general.find_dict(cluster['node_groups'], id=ng_id) instance = general.find_dict(ng['instances'], instance_name="vm123") self._assert_props(instance, volumes=[]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/manager/test_edp.py0000664000175000017500000012047200000000000024241 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import datetime from unittest import mock from sqlalchemy import exc as sa_exc import testtools from sahara import context from sahara.db.sqlalchemy import models as m from sahara import exceptions as ex from sahara.service.castellan import config as castellan import sahara.tests.unit.conductor.base as test_base from sahara.tests.unit.conductor.manager import test_clusters from sahara.utils import edp SAMPLE_DATA_SOURCE = { "tenant_id": "tenant_1", "name": "ngt_test", "description": "test_desc", "type": "Cassandra", "url": "localhost:1080", "credentials": { "user": "test", "password": "123" }, "is_public": False, "is_protected": False } SAMPLE_JOB = { "tenant_id": "tenant_1", "name": "job_test", "description": "test_desc", "type": edp.JOB_TYPE_PIG, "mains": [], "is_public": False, "is_protected": False } SAMPLE_JOB_EXECUTION = { "tenant_id": "tenant_1", "return_code": "1", "job_id": "undefined", "input_id": "undefined", "output_id": "undefined", "start_time": datetime.datetime.now(), "cluster_id": None, "is_public": False, "is_protected": False } SAMPLE_CONF_JOB_EXECUTION = { "tenant_id": "tenant_1", "progress": "0.1", "return_code": "1", "job_id": "undefined", "input_id": "undefined", "output_id": "undefined", "cluster_id": None, "job_configs": { "conf2": "value_je", "conf3": "value_je" } } BINARY_DATA = b"vU}\x97\x1c\xdf\xa686\x08\xf2\tf\x0b\xb1}" SAMPLE_JOB_BINARY_INTERNAL = { "tenant_id": "test_tenant", "name": "job_test", "data": BINARY_DATA, "is_public": False, "is_protected": False } SAMPLE_JOB_BINARY = { "tenant_id": "test_tenant", "name": "job_binary_test", "description": "test_dec", "url": "internal-db://test_binary", "is_public": False, "is_protected": False } SAMPLE_JOB_BINARY_UPDATE = { "name": "updatedName", "url": "internal-db://updated-fake-url" } SAMPLE_JOB_BINARY_SWIFT = { "tenant_id": "test_tenant", "name": "job_binary_test_swift", "description": "the description", "url": "swift://test_swift_url", } SAMPLE_JOB_BINARY_SWIFT_UPDATE = { "name": "SwifterName", "url": "swift://updated-swift" } class DataSourceTest(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(DataSourceTest, self).__init__( checks=[ lambda: SAMPLE_DATA_SOURCE ], *args, **kwargs) def setUp(self): super(DataSourceTest, self).setUp() castellan.validate_config() def test_crud_operation_create_list_delete(self): ctx = context.ctx() self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) lst = self.api.data_source_get_all(ctx) self.assertEqual(1, len(lst)) ds_id = lst[0]['id'] self.api.data_source_destroy(ctx, ds_id) lst = self.api.data_source_get_all(ctx) self.assertEqual(0, len(lst)) def test_duplicate_data_source_create(self): ctx = context.ctx() self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) def test_data_source_fields(self): ctx = context.ctx() ctx.tenant_id = SAMPLE_DATA_SOURCE['tenant_id'] ds_db_obj_id = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE)['id'] ds_db_obj = self.api.data_source_get(ctx, ds_db_obj_id) self.assertIsInstance(ds_db_obj, dict) for key, val in SAMPLE_DATA_SOURCE.items(): self.assertEqual(val, ds_db_obj.get(key), "Key not found %s" % key) def test_data_source_delete(self): ctx = context.ctx() db_obj_ds = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) _id = db_obj_ds['id'] self.api.data_source_destroy(ctx, _id) with testtools.ExpectedException(ex.NotFoundException): self.api.data_source_destroy(ctx, _id) def test_data_source_search(self): ctx = context.ctx() ctx.tenant_id = SAMPLE_DATA_SOURCE['tenant_id'] self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) lst = self.api.data_source_get_all(ctx) self.assertEqual(1, len(lst)) kwargs = {'name': SAMPLE_DATA_SOURCE['name'], 'tenant_id': SAMPLE_DATA_SOURCE['tenant_id']} lst = self.api.data_source_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value kwargs = {'name': SAMPLE_DATA_SOURCE['name']+"foo"} lst = self.api.data_source_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'name': 'ngt', 'tenant_id': SAMPLE_DATA_SOURCE['tenant_id']} lst = self.api.data_source_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_exc.InvalidRequestError, self.api.data_source_get_all, ctx, **{'badfield': 'somevalue'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_data_source_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.data_source_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.data_source_get_all(ctx, regex_search=True, name="fox") self.assertEqual(1, regex_filter.call_count) args, kwargs = regex_filter.call_args self.assertIs(args[1], m.DataSource) self.assertEqual(args[2], ["name", "description", "url"]) self.assertEqual(args[3], {"name": "fox"}) def test_data_source_count_in(self): ctx = context.ctx() ctx.tenant_id = SAMPLE_DATA_SOURCE['tenant_id'] src = copy.copy(SAMPLE_DATA_SOURCE) self.api.data_source_create(ctx, src) cnt = self.api.data_source_count(ctx, name='ngt_test') self.assertEqual(1, cnt) cnt = self.api.data_source_count(ctx, name=('ngt_test', 'test2', 'test3')) self.assertEqual(1, cnt) cnt = self.api.data_source_count(ctx, name=('test1', 'test2', 'test3')) self.assertEqual(0, cnt) lst = self.api.data_source_get_all(ctx, name='ngt_test') myid = lst[0]['id'] cnt = self.api.data_source_count(ctx, name=('ngt_test', 'test2', 'test3'), id=myid) self.assertEqual(1, cnt) cnt = self.api.data_source_count(ctx, name=('ngt_test', 'test2', 'test3'), id=(myid, '2')) self.assertEqual(1, cnt) def test_data_source_count_like(self): ctx = context.ctx() ctx.tenant_id = SAMPLE_DATA_SOURCE['tenant_id'] src = copy.copy(SAMPLE_DATA_SOURCE) self.api.data_source_create(ctx, src) cnt = self.api.data_source_count(ctx, name='ngt_test') self.assertEqual(1, cnt) cnt = self.api.data_source_count(ctx, name='ngt%') self.assertEqual(1, cnt) cnt = self.api.data_source_count(ctx, name=('ngt_test',), url='localhost%') self.assertEqual(1, cnt) cnt = self.api.data_source_count(ctx, name=('ngt_test',), url='localhost') self.assertEqual(0, cnt) def test_data_source_update(self): ctx = context.ctx() orig = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) update_json = {"name": "updatedName", "url": "swift://updatedFakeUrl"} updated = self.api.data_source_update(ctx, orig["id"], update_json) self.assertEqual("updatedName", updated["name"]) self.assertEqual("swift://updatedFakeUrl", updated["url"]) def test_ds_update_delete_when_protected(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_DATA_SOURCE) sample['is_protected'] = True ds = self.api.data_source_create(ctx, sample) ds_id = ds["id"] with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.data_source_update(ctx, ds_id, {"name": "ds"}) except ex.UpdateFailedException as e: self.assert_protected_resource_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.data_source_destroy(ctx, ds_id) except ex.DeletionFailed as e: self.assert_protected_resource_exception(e) raise e self.api.data_source_update(ctx, ds_id, {"name": "ds", "is_protected": False}) def test_public_ds_update_delete_from_another_tenant(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_DATA_SOURCE) sample['is_public'] = True ds = self.api.data_source_create(ctx, sample) ds_id = ds["id"] ctx.tenant_id = 'tenant_2' with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.data_source_update(ctx, ds_id, {"name": "ds"}) except ex.UpdateFailedException as e: self.assert_created_in_another_tenant_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.data_source_destroy(ctx, ds_id) except ex.DeletionFailed as e: self.assert_created_in_another_tenant_exception(e) raise e class JobExecutionTest(test_base.ConductorManagerTestCase): def setUp(self): super(JobExecutionTest, self).setUp() castellan.validate_config() def test_crud_operation_create_list_delete_update(self): ctx = context.ctx() job = self.api.job_create(ctx, SAMPLE_JOB) ds_input = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT = copy.copy(SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT['name'] = 'output' ds_output = self.api.data_source_create(ctx, SAMPLE_DATA_OUTPUT) SAMPLE_JOB_EXECUTION['job_id'] = job['id'] SAMPLE_JOB_EXECUTION['input_id'] = ds_input['id'] SAMPLE_JOB_EXECUTION['output_id'] = ds_output['id'] self.api.job_execution_create(ctx, SAMPLE_JOB_EXECUTION) lst = self.api.job_execution_get_all(ctx) self.assertEqual(1, len(lst)) count = self.api.job_execution_count(ctx) self.assertEqual(1, count) job_ex_id = lst[0]['id'] self.assertIsNone(lst[0]['info']) new_info = {"status": edp.JOB_STATUS_PENDING} self.api.job_execution_update(ctx, job_ex_id, {'info': new_info}) updated_job = self.api.job_execution_get(ctx, job_ex_id) self.assertEqual(new_info, updated_job['info']) self.assertEqual(SAMPLE_JOB_EXECUTION['start_time'], updated_job['start_time']) self.api.job_execution_destroy(ctx, job_ex_id) with testtools.ExpectedException(ex.NotFoundException): self.api.job_execution_update(ctx, job_ex_id, {'info': new_info}) with testtools.ExpectedException(ex.NotFoundException): self.api.job_execution_destroy(ctx, job_ex_id) lst = self.api.job_execution_get_all(ctx) self.assertEqual(0, len(lst)) def test_crud_operation_on_configured_jobs(self): ctx = context.ctx() job = self.api.job_create(ctx, SAMPLE_JOB) ds_input = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT = copy.copy(SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT['name'] = 'output' ds_output = self.api.data_source_create(ctx, SAMPLE_DATA_OUTPUT) SAMPLE_CONF_JOB_EXECUTION['job_id'] = job['id'] SAMPLE_CONF_JOB_EXECUTION['input_id'] = ds_input['id'] SAMPLE_CONF_JOB_EXECUTION['output_id'] = ds_output['id'] self.api.job_execution_create(ctx, SAMPLE_CONF_JOB_EXECUTION) lst = self.api.job_execution_get_all(ctx) self.assertEqual(1, len(lst)) job_ex = lst[0] configs = { 'conf2': 'value_je', 'conf3': 'value_je' } self.assertEqual(configs, job_ex['job_configs']) def test_null_data_sources(self): ctx = context.ctx() job = self.api.job_create(ctx, SAMPLE_JOB) SAMPLE_CONF_JOB_EXECUTION['job_id'] = job['id'] SAMPLE_CONF_JOB_EXECUTION['input_id'] = None SAMPLE_CONF_JOB_EXECUTION['output_id'] = None id = self.api.job_execution_create(ctx, SAMPLE_CONF_JOB_EXECUTION)['id'] job_exec = self.api.job_execution_get(ctx, id) self.assertIsNone(job_exec['input_id']) self.assertIsNone(job_exec['output_id']) def test_deletion_constraints_on_data_and_jobs(self): ctx = context.ctx() job = self.api.job_create(ctx, SAMPLE_JOB) ds_input = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT = copy.copy(SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT['name'] = 'output' ds_output = self.api.data_source_create(ctx, SAMPLE_DATA_OUTPUT) SAMPLE_CONF_JOB_EXECUTION['job_id'] = job['id'] SAMPLE_CONF_JOB_EXECUTION['input_id'] = ds_input['id'] SAMPLE_CONF_JOB_EXECUTION['output_id'] = ds_output['id'] self.api.job_execution_create(ctx, SAMPLE_CONF_JOB_EXECUTION) with testtools.ExpectedException(ex.DeletionFailed): self.api.data_source_destroy(ctx, ds_input['id']) with testtools.ExpectedException(ex.DeletionFailed): self.api.data_source_destroy(ctx, ds_output['id']) with testtools.ExpectedException(ex.DeletionFailed): self.api.job_destroy(ctx, job['id']) def test_job_execution_search(self): ctx = context.ctx() jvals = copy.copy(SAMPLE_JOB) jvals["name"] = "frederica" job = self.api.job_create(ctx, jvals) ds_input = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT = copy.copy(SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT['name'] = 'output' ds_output = self.api.data_source_create(ctx, SAMPLE_DATA_OUTPUT) job_exec = copy.copy(SAMPLE_JOB_EXECUTION) job_exec['job_id'] = job['id'] job_exec['input_id'] = ds_input['id'] job_exec['output_id'] = ds_output['id'] ctx.tenant_id = job_exec['tenant_id'] self.api.job_execution_create(ctx, job_exec) lst = self.api.job_execution_get_all(ctx) self.assertEqual(1, len(lst)) kwargs = {'tenant_id': job_exec['tenant_id']} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value kwargs = {'job_id': job_exec['job_id']+"foo"} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'job.name': "red"} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_exc.InvalidRequestError, self.api.job_execution_get_all, ctx, **{'badfield': 'somevalue'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_job_execution_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.job_execution_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.job_execution_get_all(ctx, regex_search=True, **{"job.name": "fox", "cluster.name": "jack", "id": "124"}) self.assertEqual(3, regex_filter.call_count) # First call, after externals were removed args, kwargs = regex_filter.call_args_list[0] self.assertIs(args[1], m.JobExecution) self.assertEqual(args[2], ["job.name", "cluster.name"]) self.assertEqual(args[3], {"id": "124"}) # Second call, looking for cluster.name args, kwargs = regex_filter.call_args_list[1] self.assertIs(args[1], m.Cluster) self.assertEqual(args[2], ["name"]) self.assertEqual(args[3], {"name": "jack"}) # Third call, looking for job.name args, kwargs = regex_filter.call_args_list[2] self.assertIs(args[1], m.Job) self.assertEqual(args[2], ["name"]) self.assertEqual(args[3], {"name": "fox"}) def test_job_execution_advanced_search(self): ctx = context.ctx() job = self.api.job_create(ctx, SAMPLE_JOB) ds_input = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT = copy.copy(SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT['name'] = 'output' ds_output = self.api.data_source_create(ctx, SAMPLE_DATA_OUTPUT) # Create a cluster cl1 = self.api.cluster_create(ctx, test_clusters.SAMPLE_CLUSTER) # Create a second cluster with a different name cl2_vals = copy.copy(test_clusters.SAMPLE_CLUSTER) cl2_vals['name'] = 'test_cluster2' cl2 = self.api.cluster_create(ctx, cl2_vals) my_sample_job_exec = copy.copy(SAMPLE_JOB_EXECUTION) my_sample_job_exec['job_id'] = job['id'] my_sample_job_exec['input_id'] = ds_input['id'] my_sample_job_exec['output_id'] = ds_output['id'] my_sample_job_exec['cluster_id'] = cl1['id'] # Run job on cluster 1 self.api.job_execution_create(ctx, my_sample_job_exec) # Run the same job on cluster 2 and set status my_sample_job_exec['cluster_id'] = cl2['id'] my_sample_job_exec['info'] = {'status': 'KiLLeD'} self.api.job_execution_create(ctx, my_sample_job_exec) # Search only with job execution fields (finds both) lst = self.api.job_execution_get_all(ctx, **{'return_code': 1}) self.assertEqual(2, len(lst)) # Search on cluster name kwargs = {'cluster.name': cl1['name'], 'return_code': 1} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Search on cluster name and job name kwargs = {'cluster.name': cl1['name'], 'job.name': SAMPLE_JOB['name'], 'return_code': 1} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Search on cluster name, job name, and status kwargs = {'cluster.name': cl2['name'], 'job.name': SAMPLE_JOB['name'], 'status': 'killed', 'return_code': 1} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Search on job name (finds both) kwargs = {'job.name': SAMPLE_JOB['name'], 'return_code': 1} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(2, len(lst)) # invalid cluster name value kwargs = {'cluster.name': cl1['name']+'foo', 'job.name': SAMPLE_JOB['name']} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # invalid job name value kwargs = {'cluster.name': cl1['name'], 'job.name': SAMPLE_JOB['name']+'foo'} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # invalid status value kwargs = {'cluster.name': cl1['name'], 'status': 'PENDING'} lst = self.api.job_execution_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) class JobTest(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(JobTest, self).__init__( checks=[ lambda: SAMPLE_JOB ], *args, **kwargs) def test_crud_operation_create_list_delete_update(self): ctx = context.ctx() self.api.job_create(ctx, SAMPLE_JOB) lst = self.api.job_get_all(ctx) self.assertEqual(1, len(lst)) jo_id = lst[0]['id'] update_jo = self.api.job_update(ctx, jo_id, {'description': 'update'}) self.assertEqual('update', update_jo['description']) self.api.job_destroy(ctx, jo_id) lst = self.api.job_get_all(ctx) self.assertEqual(0, len(lst)) with testtools.ExpectedException(ex.NotFoundException): self.api.job_destroy(ctx, jo_id) def test_job_fields(self): ctx = context.ctx() ctx.tenant_id = SAMPLE_JOB['tenant_id'] job_id = self.api.job_create(ctx, SAMPLE_JOB)['id'] job = self.api.job_get(ctx, job_id) self.assertIsInstance(job, dict) for key, val in SAMPLE_JOB.items(): self.assertEqual(val, job.get(key), "Key not found %s" % key) def test_job_search(self): ctx = context.ctx() job = copy.copy(SAMPLE_JOB) job["name"] = "frederica" job["description"] = "thebestjob" ctx.tenant_id = job['tenant_id'] self.api.job_create(ctx, job) lst = self.api.job_get_all(ctx) self.assertEqual(1, len(lst)) kwargs = {'name': job['name'], 'tenant_id': job['tenant_id']} lst = self.api.job_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value lst = self.api.job_get_all(ctx, **{'name': job['name']+"foo"}) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'name': "red", 'description': "best"} lst = self.api.job_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_exc.InvalidRequestError, self.api.job_get_all, ctx, **{'badfield': 'somevalue'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_job_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.job_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.job_get_all(ctx, regex_search=True, name="fox") self.assertEqual(1, regex_filter.call_count) args, kwargs = regex_filter.call_args self.assertIs(args[1], m.Job) self.assertEqual(args[2], ["name", "description"]) self.assertEqual(args[3], {"name": "fox"}) def test_job_update_delete_when_protected(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_JOB) sample['is_protected'] = True job = self.api.job_create(ctx, sample) job_id = job["id"] with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.job_update(ctx, job_id, {"name": "job"}) except ex.UpdateFailedException as e: self.assert_protected_resource_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.job_destroy(ctx, job_id) except ex.DeletionFailed as e: self.assert_protected_resource_exception(e) raise e self.api.job_update(ctx, job_id, {"name": "job", "is_protected": False}) def test_public_job_update_delete_from_another_tenant(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_JOB) sample['is_public'] = True job = self.api.job_create(ctx, sample) job_id = job["id"] ctx.tenant_id = 'tenant_2' with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.job_update(ctx, job_id, {"name": "job"}) except ex.UpdateFailedException as e: self.assert_created_in_another_tenant_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.job_destroy(ctx, job_id) except ex.DeletionFailed as e: self.assert_created_in_another_tenant_exception(e) raise e class JobBinaryInternalTest(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(JobBinaryInternalTest, self).__init__( checks=[ lambda: SAMPLE_JOB_BINARY_INTERNAL ], *args, **kwargs) def test_crud_operation_create_list_delete_update(self): ctx = context.ctx() self.api.job_binary_internal_create(ctx, SAMPLE_JOB_BINARY_INTERNAL) lst = self.api.job_binary_internal_get_all(ctx) self.assertEqual(1, len(lst)) job_bin_int_id = lst[0]['id'] update_jbi = self.api.job_binary_internal_update( ctx, job_bin_int_id, {'name': 'newname'}) self.assertEqual('newname', update_jbi['name']) self.api.job_binary_internal_destroy(ctx, job_bin_int_id) lst = self.api.job_binary_internal_get_all(ctx) self.assertEqual(0, len(lst)) with testtools.ExpectedException(ex.NotFoundException): self.api.job_binary_internal_destroy(ctx, job_bin_int_id) def test_duplicate_job_binary_internal_create(self): ctx = context.ctx() self.api.job_binary_internal_create(ctx, SAMPLE_JOB_BINARY_INTERNAL) with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.job_binary_internal_create(ctx, SAMPLE_JOB_BINARY_INTERNAL) def test_job_binary_internal_get_raw(self): ctx = context.ctx() id = self.api.job_binary_internal_create(ctx, SAMPLE_JOB_BINARY_INTERNAL )['id'] data = self.api.job_binary_internal_get_raw_data(ctx, id) self.assertEqual(SAMPLE_JOB_BINARY_INTERNAL["data"], data) self.api.job_binary_internal_destroy(ctx, id) data = self.api.job_binary_internal_get_raw_data(ctx, id) self.assertIsNone(data) def test_job_binary_internal_fields(self): ctx = context.ctx() ctx.tenant_id = SAMPLE_JOB_BINARY_INTERNAL['tenant_id'] id = self.api.job_binary_internal_create( ctx, SAMPLE_JOB_BINARY_INTERNAL)['id'] internal = self.api.job_binary_internal_get(ctx, id) self.assertIsInstance(internal, dict) with testtools.ExpectedException(KeyError): internal["data"] internal["data"] = self.api.job_binary_internal_get_raw_data(ctx, id) for key, val in SAMPLE_JOB_BINARY_INTERNAL.items(): if key == "datasize": self.assertEqual(len(BINARY_DATA), internal["datasize"]) else: self.assertEqual(val, internal.get(key), "Key not found %s" % key) def test_job_binary_internal_search(self): ctx = context.ctx() jbi = copy.copy(SAMPLE_JOB_BINARY_INTERNAL) jbi["name"] = "frederica" ctx.tenant_id = jbi['tenant_id'] self.api.job_binary_internal_create(ctx, jbi) lst = self.api.job_binary_internal_get_all(ctx) self.assertEqual(1, len(lst)) kwargs = {'name': jbi['name'], 'tenant_id': jbi['tenant_id']} lst = self.api.job_binary_internal_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value kwargs = {'name': jbi['name']+"foo"} lst = self.api.job_binary_internal_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'name': "red"} lst = self.api.job_binary_internal_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_exc.InvalidRequestError, self.api.job_binary_internal_get_all, ctx, **{'badfield': 'junk'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_job_binary_internal_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.job_binary_internal_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.job_binary_internal_get_all(ctx, regex_search=True, name="fox") self.assertEqual(1, regex_filter.call_count) args, kwargs = regex_filter.call_args self.assertIs(args[1], m.JobBinaryInternal) self.assertEqual(args[2], ["name"]) self.assertEqual(args[3], {"name": "fox"}) def test_jbi_update_delete_when_protected(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_JOB_BINARY_INTERNAL) sample['is_protected'] = True jbi = self.api.job_binary_internal_create(ctx, sample) jbi_id = jbi["id"] with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.job_binary_internal_update(ctx, jbi_id, {"name": "jbi"}) except ex.UpdateFailedException as e: self.assert_protected_resource_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.job_binary_internal_destroy(ctx, jbi_id) except ex.DeletionFailed as e: self.assert_protected_resource_exception(e) raise e self.api.job_binary_internal_update(ctx, jbi_id, {"name": "jbi", "is_protected": False}) def test_public_jbi_update_delete_from_another_tenant(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_JOB_BINARY_INTERNAL) sample['is_public'] = True jbi = self.api.job_binary_internal_create(ctx, sample) jbi_id = jbi["id"] ctx.tenant_id = 'tenant_2' with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.job_binary_internal_update(ctx, jbi_id, {"name": "jbi"}) except ex.UpdateFailedException as e: self.assert_created_in_another_tenant_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.job_binary_internal_destroy(ctx, jbi_id) except ex.DeletionFailed as e: self.assert_created_in_another_tenant_exception(e) raise e class JobBinaryTest(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(JobBinaryTest, self).__init__( checks=[ lambda: SAMPLE_JOB_BINARY ], *args, **kwargs) def setUp(self): super(JobBinaryTest, self).setUp() castellan.validate_config() def test_crud_operation_create_list_delete(self): ctx = context.ctx() self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY) lst = self.api.job_binary_get_all(ctx) self.assertEqual(1, len(lst)) job_binary_id = lst[0]['id'] self.api.job_binary_destroy(ctx, job_binary_id) lst = self.api.job_binary_get_all(ctx) self.assertEqual(0, len(lst)) with testtools.ExpectedException(ex.NotFoundException): self.api.job_binary_destroy(ctx, job_binary_id) def test_job_binary_fields(self): ctx = context.ctx() ctx.tenant_id = SAMPLE_JOB_BINARY['tenant_id'] job_binary_id = self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY)['id'] job_binary = self.api.job_binary_get(ctx, job_binary_id) self.assertIsInstance(job_binary, dict) for key, val in SAMPLE_JOB_BINARY.items(): self.assertEqual(val, job_binary.get(key), "Key not found %s" % key) def _test_job_binary_referenced(self, reference): ctx = context.ctx() job_binary_id = self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY)['id'] job_values = copy.copy(SAMPLE_JOB) job_values[reference] = [job_binary_id] job_id = self.api.job_create(ctx, job_values)['id'] # Delete while referenced, fails with testtools.ExpectedException(ex.DeletionFailed): self.api.job_binary_destroy(ctx, job_binary_id) # Delete while not referenced self.api.job_destroy(ctx, job_id) self.api.job_binary_destroy(ctx, job_binary_id) lst = self.api.job_binary_get_all(ctx) self.assertEqual(0, len(lst)) def test_job_binary_referenced_mains(self): self._test_job_binary_referenced("mains") def test_job_binary_referenced_libs(self): self._test_job_binary_referenced("libs") def test_duplicate_job_binary_create(self): ctx = context.ctx() self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY) with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY) def test_job_binary_search(self): ctx = context.ctx() jb = copy.copy(SAMPLE_JOB_BINARY) jb["name"] = "frederica" jb["url"] = "http://thebestbinary" ctx.tenant_id = jb['tenant_id'] self.api.job_binary_create(ctx, jb) lst = self.api.job_binary_get_all(ctx) self.assertEqual(1, len(lst)) kwargs = {'name': jb['name'], 'tenant_id': jb['tenant_id']} lst = self.api.job_binary_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value kwargs = {'name': jb['name']+"foo"} lst = self.api.job_binary_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'name': "red", 'url': "best"} lst = self.api.job_binary_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_exc.InvalidRequestError, self.api.job_binary_get_all, ctx, **{'badfield': 'somevalue'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_job_binary_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.job_binary_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.job_binary_get_all(ctx, regex_search=True, name="fox") self.assertEqual(1, regex_filter.call_count) args, kwargs = regex_filter.call_args self.assertIs(args[1], m.JobBinary) self.assertEqual(args[2], ["name", "description", "url"]) self.assertEqual(args[3], {"name": "fox"}) def test_job_binary_update(self): ctx = context.ctx() original = self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY_SWIFT) updated = self.api.job_binary_update( ctx, original["id"], SAMPLE_JOB_BINARY_SWIFT_UPDATE) # Make sure that the update did indeed succeed self.assertEqual( SAMPLE_JOB_BINARY_SWIFT_UPDATE["name"], updated["name"]) self.assertEqual(SAMPLE_JOB_BINARY_SWIFT_UPDATE["url"], updated["url"]) # Make sure we do NOT update a binary in use by a PENDING job self._create_job_execution_ref_job_binary(ctx, original["id"]) with testtools.ExpectedException(ex.UpdateFailedException): self.api.job_binary_update( ctx, original["id"], SAMPLE_JOB_BINARY_SWIFT_UPDATE) original = self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY) # Make sure that internal URL update fails with testtools.ExpectedException(ex.UpdateFailedException): self.api.job_binary_update( ctx, original["id"], SAMPLE_JOB_BINARY_UPDATE) def _create_job_execution_ref_job_binary(self, ctx, jb_id): JOB_REF_BINARY = copy.copy(SAMPLE_JOB) JOB_REF_BINARY["mains"] = [jb_id] job = self.api.job_create(ctx, JOB_REF_BINARY) ds_input = self.api.data_source_create(ctx, SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT = copy.copy(SAMPLE_DATA_SOURCE) SAMPLE_DATA_OUTPUT['name'] = 'output' ds_output = self.api.data_source_create(ctx, SAMPLE_DATA_OUTPUT) SAMPLE_JOB_EXECUTION['job_id'] = job['id'] SAMPLE_JOB_EXECUTION['input_id'] = ds_input['id'] SAMPLE_JOB_EXECUTION['output_id'] = ds_output['id'] self.api.job_execution_create(ctx, SAMPLE_JOB_EXECUTION) lst = self.api.job_execution_get_all(ctx) job_ex_id = lst[0]["id"] new_info = {"status": edp.JOB_STATUS_PENDING} self.api.job_execution_update(ctx, job_ex_id, {"info": new_info}) def test_jb_update_delete_when_protected(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_JOB_BINARY) sample['is_protected'] = True jb = self.api.job_binary_create(ctx, sample) jb_id = jb["id"] with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.job_binary_update(ctx, jb_id, {"name": "jb"}) except ex.UpdateFailedException as e: self.assert_protected_resource_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.job_binary_destroy(ctx, jb_id) except ex.DeletionFailed as e: self.assert_protected_resource_exception(e) raise e self.api.job_binary_update(ctx, jb_id, {"name": "jb", "is_protected": False}) def test_public_jb_update_delete_from_another_tenant(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_JOB_BINARY) sample['is_public'] = True jb = self.api.job_binary_create(ctx, sample) jb_id = jb["id"] ctx.tenant_id = 'tenant_2' with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.job_binary_update(ctx, jb_id, {"name": "jb"}) except ex.UpdateFailedException as e: self.assert_created_in_another_tenant_exception(e) raise e with testtools.ExpectedException(ex.DeletionFailed): try: self.api.job_binary_destroy(ctx, jb_id) except ex.DeletionFailed as e: self.assert_created_in_another_tenant_exception(e) raise e ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/manager/test_edp_interface.py0000664000175000017500000000703500000000000026260 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara import context import sahara.tests.unit.conductor.base as test_base from sahara.tests.unit.conductor.manager import test_edp def _merge_dict(original, update): new = copy.deepcopy(original) new.update(update) return new SAMPLE_JOB = _merge_dict(test_edp.SAMPLE_JOB, { "interface": [ { "name": "Reducer Count", "mapping_type": "configs", "location": "mapred.reduce.tasks", "value_type": "number", "required": True, "default": "1" }, { "name": "Input Path", "mapping_type": "params", "location": "INPUT", "value_type": "data_source", "required": False, "default": "hdfs://path" }, { "name": "Positional Argument 2", "mapping_type": "args", "location": "1", "value_type": "string", "required": False, "default": "default" }, { "name": "Positional Argument 1", "mapping_type": "args", "location": "0", "value_type": "string", "required": False, "default": "arg_1" }, ] }) SAMPLE_JOB_EXECUTION = _merge_dict(test_edp.SAMPLE_JOB, { "interface": { "Reducer Count": "2", "Positional Argument 2": "arg_2" }, "job_configs": {"args": ["arg_3"], "configs": {"mapred.map.tasks": "3"}} }) class JobExecutionTest(test_base.ConductorManagerTestCase): def test_interface_flows(self): ctx = context.ctx() job = self.api.job_create(ctx, SAMPLE_JOB) arg_names = [arg['name'] for arg in job['interface']] self.assertEqual(arg_names, ["Reducer Count", "Input Path", "Positional Argument 2", "Positional Argument 1"]) job_ex_input = copy.deepcopy(SAMPLE_JOB_EXECUTION) job_ex_input['job_id'] = job['id'] self.api.job_execution_create(ctx, job_ex_input) lst = self.api.job_execution_get_all(ctx) self.assertEqual(1, len(lst)) job_ex_result = lst[0] configs = { 'configs': {'mapred.reduce.tasks': '2', 'mapred.map.tasks': '3'}, 'args': ['arg_1', 'arg_2', 'arg_3'], 'params': {'INPUT': 'hdfs://path'} } self.assertEqual(configs, job_ex_result['job_configs']) self.api.job_execution_destroy(ctx, job_ex_result['id']) del job_ex_input['job_configs'] self.api.job_execution_create(ctx, job_ex_input) lst = self.api.job_execution_get_all(ctx) self.assertEqual(1, len(lst)) job_ex_result = lst[0] configs = { 'configs': {'mapred.reduce.tasks': '2'}, 'args': ['arg_1', 'arg_2'], 'params': {'INPUT': 'hdfs://path'} } self.assertEqual(configs, job_ex_result['job_configs']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/manager/test_from_template.py0000664000175000017500000000756100000000000026332 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara.conductor import manager from sahara import context import sahara.tests.unit.conductor.base as test_base from sahara.tests.unit.conductor.manager import test_clusters from sahara.tests.unit.conductor.manager import test_templates CORRECT_CONF = { 'service_1': {'config_2': 'value_2', 'config_1': 'value_1'}, 'service_2': {'config_1': 'value_1'} } class ObjectsFromTemplatesTest(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(ObjectsFromTemplatesTest, self).__init__( checks=[ lambda: CORRECT_CONF, lambda: test_clusters.SAMPLE_CLUSTER, lambda: test_templates.SAMPLE_CLT, lambda: test_templates.SAMPLE_NGT, lambda: manager.CLUSTER_DEFAULTS, lambda: manager.NODE_GROUP_DEFAULTS, lambda: manager.INSTANCE_DEFAULTS, ], *args, **kwargs) def test_cluster_create_from_templates(self): ctx = context.ctx() # create node_group_template ng_tmpl = copy.deepcopy(test_templates.SAMPLE_NGT) ng_tmpl['volumes_size'] = 10 ng_tmpl['node_configs']['service_1']['config_2'] = 'value_2' ng_tmpl = self.api.node_group_template_create(ctx, ng_tmpl) # create cluster template cl_tmpl = self.api.cluster_template_create(ctx, test_templates.SAMPLE_CLT) # create cluster cluster_val = copy.deepcopy(test_clusters.SAMPLE_CLUSTER) cluster_val['cluster_template_id'] = cl_tmpl['id'] cluster_val['node_groups'][0]['node_group_template_id'] = ng_tmpl['id'] cluster = self.api.cluster_create(ctx, cluster_val) self.assertEqual(CORRECT_CONF, cluster['cluster_configs']) for node_group in cluster['node_groups']: if node_group['name'] == 'ng_1': self.assertEqual(['p1', 'p2'], node_group['node_processes']) self.assertEqual(10, node_group['volumes_size']) self.assertEqual(CORRECT_CONF, node_group['node_configs']) def test_node_group_add_from_template(self): ctx = context.ctx() # create cluster sample_copy = copy.deepcopy(test_clusters.SAMPLE_CLUSTER) cluster = self.api.cluster_create(ctx, sample_copy) # create node_group_template ng_tmpl = copy.deepcopy(test_templates.SAMPLE_NGT) ng_tmpl['volumes_size'] = 10 ng_tmpl['node_configs']['service_1']['config_2'] = 'value_2' ng_tmpl = self.api.node_group_template_create(ctx, ng_tmpl) # add node group to cluster ng = copy.deepcopy(test_clusters.SAMPLE_CLUSTER['node_groups'][0]) ng['node_group_template_id'] = ng_tmpl['id'] ng['count'] = 5 ng['name'] = 'ng_3' self.api.node_group_add(ctx, cluster['id'], ng) # refetch cluster cluster = self.api.cluster_get(ctx, cluster['id']) for node_group in cluster['node_groups']: if node_group['name'] == 'ng_3': self.assertEqual(['p1', 'p2'], node_group['node_processes']) self.assertEqual(10, node_group['volumes_size']) self.assertEqual(CORRECT_CONF, node_group['node_configs']) self.assertEqual(5, node_group['count']) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/manager/test_templates.py0000664000175000017500000007230300000000000025466 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from unittest import mock from oslo_utils import uuidutils import six from sqlalchemy import exc as sa_ex import testtools from sahara.conductor import manager from sahara import context from sahara.db.sqlalchemy import models as m from sahara import exceptions as ex from sahara.service.validations import cluster_template_schema as cl_schema from sahara.service.validations import node_group_template_schema as ngt_schema import sahara.tests.unit.conductor.base as test_base import sahara.tests.unit.conductor.manager.test_clusters as cluster_tests SAMPLE_NGT = { "name": "ngt_test", "flavor_id": "42", "plugin_name": "test_plugin", "hadoop_version": "test_version", "node_processes": ["p1", "p2"], "image_id": uuidutils.generate_uuid(), "node_configs": { "service_1": { "config_1": "value_1" }, "service_2": { "config_1": "value_1" } }, "volumes_per_node": 1, "volumes_size": 1, "volume_type": "big", "volumes_availability_zone": "here", "volume_mount_prefix": "/tmp", "description": "my template", "floating_ip_pool": "public", "security_groups": ["cat", "dog"], "auto_security_group": False, "availability_zone": "here", "is_proxy_gateway": False, "volume_local_to_instance": False, 'use_autoconfig': True, "is_public": False, "is_protected": False } SAMPLE_CLT = { "name": "clt_test", "plugin_name": "test_plugin", "hadoop_version": "test_version", "default_image_id": uuidutils.generate_uuid(), "cluster_configs": { "service_1": { "config_1": "value_1" }, "service_2": { "config_1": "value_1" } }, "node_groups": [ { "name": "ng_1", "flavor_id": "42", "node_processes": ["p1", "p2"], "count": 1, "floating_ip_pool": None, "security_groups": None, "availability_zone": None, 'use_autoconfig': True, "shares": None }, { "name": "ng_2", "flavor_id": "42", "node_processes": ["p3", "p4"], "count": 3, "floating_ip_pool": None, "security_groups": ["group1", "group2"], "availability_zone": None, 'use_autoconfig': True, "shares": None } ], "anti_affinity": ["datanode"], "description": "my template", "neutron_management_network": uuidutils.generate_uuid(), "shares": None, "is_public": False, "is_protected": False } class NodeGroupTemplates(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(NodeGroupTemplates, self).__init__( checks=[ lambda: SAMPLE_CLT, lambda: SAMPLE_NGT, lambda: manager.CLUSTER_DEFAULTS, lambda: manager.NODE_GROUP_DEFAULTS, lambda: manager.INSTANCE_DEFAULTS, ], *args, **kwargs) def test_minimal_ngt_create_list_delete(self): ctx = context.ctx() self.api.node_group_template_create(ctx, SAMPLE_NGT) lst = self.api.node_group_template_get_all(ctx) self.assertEqual(1, len(lst)) ngt_id = lst[0]['id'] self.api.node_group_template_destroy(ctx, ngt_id) lst = self.api.node_group_template_get_all(ctx) self.assertEqual(0, len(lst)) def test_duplicate_ngt_create(self): ctx = context.ctx() self.api.node_group_template_create(ctx, SAMPLE_NGT) with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.node_group_template_create(ctx, SAMPLE_NGT) def test_ngt_fields(self): ctx = context.ctx() ngt_db_obj_id = self.api.node_group_template_create( ctx, SAMPLE_NGT)['id'] ngt_db_obj = self.api.node_group_template_get(ctx, ngt_db_obj_id) self.assertIsInstance(ngt_db_obj, dict) for key, val in SAMPLE_NGT.items(): self.assertEqual(val, ngt_db_obj.get(key), "Key not found %s" % key) def test_ngt_delete(self): ctx = context.ctx() db_obj_ngt = self.api.node_group_template_create(ctx, SAMPLE_NGT) _id = db_obj_ngt['id'] self.api.node_group_template_destroy(ctx, _id) with testtools.ExpectedException(ex.NotFoundException): self.api.node_group_template_destroy(ctx, _id) def test_ngt_delete_default(self): ctx = context.ctx() vals = copy.copy(SAMPLE_NGT) vals["name"] = "protected" vals["is_protected"] = True ngt_prot = self.api.node_group_template_create(ctx, vals) ngt_prot_id = ngt_prot['id'] vals["name"] = "protected_default" vals["is_protected"] = True vals["is_default"] = True ngt_prot_def = self.api.node_group_template_create(ctx, vals) ngt_prot_def_id = ngt_prot_def['id'] # We should not be able to delete ngt_prot until we remove # the protected flag, even if we pass ignore_prot_on_def with testtools.ExpectedException(ex.DeletionFailed): self.api.node_group_template_destroy(ctx, ngt_prot_id) with testtools.ExpectedException(ex.DeletionFailed): self.api.node_group_template_destroy(ctx, ngt_prot_id, ignore_prot_on_def=True) update_values = {"is_protected": False} self.api.node_group_template_update(ctx, ngt_prot_id, update_values) self.api.node_group_template_destroy(ctx, ngt_prot_id) with testtools.ExpectedException(ex.NotFoundException): self.api.node_group_template_destroy(ctx, ngt_prot_id) # However, for the protected_default we should be able to # override the protected check by passing ignore_prot_on_def with testtools.ExpectedException(ex.DeletionFailed): self.api.node_group_template_destroy(ctx, ngt_prot_def_id) self.api.node_group_template_destroy(ctx, ngt_prot_def_id, ignore_prot_on_def=True) with testtools.ExpectedException(ex.NotFoundException): self.api.node_group_template_destroy(ctx, ngt_prot_def_id) def test_ngt_search(self): ctx = context.ctx() ngt = copy.deepcopy(SAMPLE_NGT) ngt["name"] = "frederica" ngt["plugin_name"] = "test plugin" self.api.node_group_template_create(ctx, ngt) lst = self.api.node_group_template_get_all(ctx) self.assertEqual(1, len(lst)) # Exact match kwargs = {'name': ngt['name'], 'plugin_name': ngt['plugin_name']} lst = self.api.node_group_template_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value kwargs = {'name': ngt['name']+"foo"} lst = self.api.node_group_template_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'name': "red", 'plugin_name': "test"} lst = self.api.node_group_template_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_ex.InvalidRequestError, self.api.node_group_template_get_all, ctx, **{'badfield': 'junk'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_ngt_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.node_group_template_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.node_group_template_get_all(ctx, regex_search=True, name="fox") self.assertEqual(1, regex_filter.call_count) args, kwargs = regex_filter.call_args self.assertIs(args[1], m.NodeGroupTemplate) self.assertEqual(args[2], ["name", "description", "plugin_name", "tenant_id"]) self.assertEqual(args[3], {"name": "fox"}) def test_ngt_update(self): ctx = context.ctx() ngt = self.api.node_group_template_create(ctx, SAMPLE_NGT) ngt_id = ngt["id"] UPDATE_NAME = "UpdatedSampleNGTName" update_values = {"name": UPDATE_NAME} updated_ngt = self.api.node_group_template_update(ctx, ngt_id, update_values) self.assertEqual(UPDATE_NAME, updated_ngt["name"]) updated_ngt = self.api.node_group_template_get(ctx, ngt_id) self.assertEqual(UPDATE_NAME, updated_ngt["name"]) with testtools.ExpectedException(ex.NotFoundException): self.api.node_group_template_update(ctx, -1, update_values) ngt = self.api.node_group_template_create(ctx, SAMPLE_NGT) ngt_id = ngt['id'] with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.node_group_template_update(ctx, ngt_id, update_values) def test_ngt_update_default(self): ctx = context.ctx() vals = copy.copy(SAMPLE_NGT) vals["name"] = "protected" vals["is_protected"] = True ngt_prot = self.api.node_group_template_create(ctx, vals) ngt_prot_id = ngt_prot["id"] vals["name"] = "protected_default" vals["is_protected"] = True vals["is_default"] = True ngt_prot_def = self.api.node_group_template_create(ctx, vals) ngt_prot_def_id = ngt_prot_def["id"] # We should not be able to update ngt_prot until we remove # the is_protected flag, even if we pass ignore_prot_on_def UPDATE_NAME = "UpdatedSampleNGTName" update_values = {"name": UPDATE_NAME} with testtools.ExpectedException(ex.UpdateFailedException): self.api.node_group_template_update(ctx, ngt_prot_id, update_values) with testtools.ExpectedException(ex.UpdateFailedException): self.api.node_group_template_update(ctx, ngt_prot_id, update_values, ignore_prot_on_def=True) update_values["is_protected"] = False updated_ngt = self.api.node_group_template_update(ctx, ngt_prot_id, update_values) self.assertEqual(UPDATE_NAME, updated_ngt["name"]) # However, for the ngt_prot_def we should be able to # override the is_protected check by passing ignore_prot_on_def update_values = {"name": UPDATE_NAME+"default"} with testtools.ExpectedException(ex.UpdateFailedException): self.api.node_group_template_update(ctx, ngt_prot_def_id, update_values) updated_ngt = self.api.node_group_template_update( ctx, ngt_prot_def_id, update_values, ignore_prot_on_def=True) self.assertEqual(UPDATE_NAME+"default", updated_ngt["name"]) self.assertTrue(updated_ngt["is_protected"]) self.assertTrue(updated_ngt["is_default"]) def test_ngt_update_with_nulls(self): ctx = context.ctx() ngt = self.api.node_group_template_create(ctx, SAMPLE_NGT) ngt_id = ngt["id"] updated_values = copy.deepcopy(SAMPLE_NGT) for prop, value in six.iteritems( ngt_schema.NODE_GROUP_TEMPLATE_SCHEMA["properties"]): if type(value["type"]) is list and "null" in value["type"]: updated_values[prop] = None # Prove that we can call update on these fields with null values # without an exception self.api.node_group_template_update(ctx, ngt_id, updated_values) updated_ngt = self.api.node_group_template_get(ctx, ngt_id) for prop, value in six.iteritems(updated_values): if value is None: self.assertIsNone(updated_ngt[prop]) def test_ngt_update_delete_when_protected(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_NGT) sample['is_protected'] = True ngt = self.api.node_group_template_create(ctx, sample) ngt_id = ngt["id"] with testtools.ExpectedException(ex.UpdateFailedException): self.api.node_group_template_update(ctx, ngt_id, {"name": "tmpl"}) with testtools.ExpectedException(ex.DeletionFailed): self.api.node_group_template_destroy(ctx, ngt_id) self.api.node_group_template_update(ctx, ngt_id, {"name": "tmpl", "is_protected": False}) def test_public_ngt_update_from_another_tenant(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_NGT) sample['is_public'] = True ngt = self.api.node_group_template_create(ctx, sample) ngt_id = ngt["id"] ctx.tenant_id = 'tenant_2' with testtools.ExpectedException(ex.UpdateFailedException): self.api.node_group_template_update(ctx, ngt_id, {"name": "tmpl"}) def test_public_ngt_delete_from_another_tenant(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_NGT) sample['is_public'] = True ngt = self.api.node_group_template_create(ctx, sample) ngt_id = ngt["id"] ctx.tenant_id = 'tenant_2' with testtools.ExpectedException(ex.DeletionFailed): self.api.node_group_template_destroy(ctx, ngt_id) class ClusterTemplates(test_base.ConductorManagerTestCase): def __init__(self, *args, **kwargs): super(ClusterTemplates, self).__init__( checks=[ lambda: SAMPLE_CLT, lambda: SAMPLE_NGT, lambda: manager.CLUSTER_DEFAULTS, lambda: manager.NODE_GROUP_DEFAULTS, lambda: manager.INSTANCE_DEFAULTS, ], *args, **kwargs) def test_minimal_clt_create_list_delete(self): ctx = context.ctx() self.api.cluster_template_create(ctx, SAMPLE_CLT) lst = self.api.cluster_template_get_all(ctx) self.assertEqual(1, len(lst)) clt_id = lst[0]['id'] self.api.cluster_template_destroy(ctx, clt_id) lst = self.api.cluster_template_get_all(ctx) self.assertEqual(0, len(lst)) with testtools.ExpectedException(ex.NotFoundException): self.api.cluster_template_destroy(ctx, clt_id) def test_duplicate_clt_create(self): ctx = context.ctx() self.api.cluster_template_create(ctx, SAMPLE_CLT) with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.cluster_template_create(ctx, SAMPLE_CLT) def test_clt_fields(self): ctx = context.ctx() clt_db_obj_id = self.api.cluster_template_create(ctx, SAMPLE_CLT)['id'] clt_db_obj = self.api.cluster_template_get(ctx, clt_db_obj_id) self.assertIsInstance(clt_db_obj, dict) for key, val in SAMPLE_CLT.items(): if key == 'node_groups': # this will be checked separately continue self.assertEqual(val, clt_db_obj.get(key), "Key not found %s" % key) for ng in clt_db_obj["node_groups"]: ng.pop("created_at") ng.pop("updated_at") ng.pop("id") ng.pop("tenant_id") self.assertEqual(clt_db_obj_id, ng.pop("cluster_template_id")) ng.pop("image_id") ng.pop("node_configs") ng.pop("node_group_template_id") ng.pop("volume_mount_prefix") ng.pop("volumes_size") ng.pop("volumes_per_node") ng.pop("volumes_availability_zone") ng.pop("volume_type") ng.pop("auto_security_group") ng.pop("is_proxy_gateway") ng.pop("boot_from_volume") ng.pop("boot_volume_type") ng.pop("boot_volume_availability_zone") ng.pop("boot_volume_local_to_instance") ng.pop('volume_local_to_instance') self.assertEqual(SAMPLE_CLT["node_groups"], clt_db_obj["node_groups"]) def test_clt_delete(self): ctx = context.ctx() db_obj_clt = self.api.cluster_template_create(ctx, SAMPLE_CLT) _id = db_obj_clt['id'] self.api.cluster_template_destroy(ctx, _id) with testtools.ExpectedException(ex.NotFoundException): self.api.cluster_template_destroy(ctx, _id) def test_clt_delete_default(self): ctx = context.ctx() vals = copy.copy(SAMPLE_CLT) vals["name"] = "protected" vals["is_protected"] = True clt_prot = self.api.cluster_template_create(ctx, vals) clt_prot_id = clt_prot['id'] vals["name"] = "protected_default" vals["is_protected"] = True vals["is_default"] = True clt_prot_def = self.api.cluster_template_create(ctx, vals) clt_prot_def_id = clt_prot_def['id'] # We should not be able to delete clt_prot until we remove # the is_protected flag, even if we pass ignore_prot_on_def with testtools.ExpectedException(ex.DeletionFailed): self.api.cluster_template_destroy(ctx, clt_prot_id) with testtools.ExpectedException(ex.DeletionFailed): self.api.cluster_template_destroy(ctx, clt_prot_id, ignore_prot_on_def=True) update_values = {"is_protected": False} self.api.cluster_template_update(ctx, clt_prot_id, update_values) self.api.cluster_template_destroy(ctx, clt_prot_id) with testtools.ExpectedException(ex.NotFoundException): self.api.cluster_template_destroy(ctx, clt_prot_id) # However, for clt_prot_def we should be able to override # the is_protected check by passing ignore_prot_on_def with testtools.ExpectedException(ex.DeletionFailed): self.api.cluster_template_destroy(ctx, clt_prot_def_id) self.api.cluster_template_destroy(ctx, clt_prot_def_id, ignore_prot_on_def=True) with testtools.ExpectedException(ex.NotFoundException): self.api.cluster_template_destroy(ctx, clt_prot_def_id) def test_clt_search(self): ctx = context.ctx() clt = copy.deepcopy(SAMPLE_CLT) clt["name"] = "frederica" clt["plugin_name"] = "test_plugin" self.api.cluster_template_create(ctx, clt) lst = self.api.cluster_template_get_all(ctx) self.assertEqual(1, len(lst)) # Exact match kwargs = {'name': clt['name'], 'plugin_name': clt['plugin_name']} lst = self.api.cluster_template_get_all(ctx, **kwargs) self.assertEqual(1, len(lst)) # Valid field but no matching value kwargs = {'name': clt['name']+"foo"} lst = self.api.cluster_template_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Valid field with substrings kwargs = {'name': "red", 'plugin_name': "test"} lst = self.api.cluster_template_get_all(ctx, **kwargs) self.assertEqual(0, len(lst)) # Invalid field self.assertRaises(sa_ex.InvalidRequestError, self.api.cluster_template_get_all, ctx, **{'badfield': 'junk'}) @mock.patch('sahara.db.sqlalchemy.api.regex_filter') def test_clt_search_regex(self, regex_filter): # do this so we can return the correct value def _regex_filter(query, cls, regex_cols, search_opts): return query, search_opts regex_filter.side_effect = _regex_filter ctx = context.ctx() self.api.cluster_template_get_all(ctx) self.assertEqual(0, regex_filter.call_count) self.api.cluster_template_get_all(ctx, regex_search=True, name="fox") self.assertEqual(1, regex_filter.call_count) args, kwargs = regex_filter.call_args self.assertIs(args[1], m.ClusterTemplate) self.assertEqual(args[2], ["name", "description", "plugin_name", "tenant_id"]) self.assertEqual(args[3], {"name": "fox"}) def test_clt_update(self): ctx = context.ctx() clt = self.api.cluster_template_create(ctx, SAMPLE_CLT) clt_id = clt["id"] UPDATE_NAME = "UpdatedClusterTemplate" update_values = {"name": UPDATE_NAME} updated_clt = self.api.cluster_template_update(ctx, clt_id, update_values) self.assertEqual(UPDATE_NAME, updated_clt["name"]) updated_clt = self.api.cluster_template_get(ctx, clt_id) self.assertEqual(UPDATE_NAME, updated_clt["name"]) self.assertEqual(clt["node_groups"], updated_clt["node_groups"]) # check duplicate name handling clt = self.api.cluster_template_create(ctx, SAMPLE_CLT) clt_id = clt["id"] with testtools.ExpectedException(ex.DBDuplicateEntry): self.api.cluster_template_update(ctx, clt_id, update_values) with testtools.ExpectedException(ex.NotFoundException): self.api.cluster_template_update(ctx, -1, update_values) # create a cluster and try updating the referenced cluster template cluster_val = copy.deepcopy(cluster_tests.SAMPLE_CLUSTER) cluster_val['name'] = "ClusterTemplateUpdateTestCluster" cluster_val['cluster_template_id'] = clt['id'] self.api.cluster_create(ctx, cluster_val) update_values = {"name": "noUpdateInUseName"} with testtools.ExpectedException(ex.UpdateFailedException): self.api.cluster_template_update(ctx, clt['id'], update_values) def test_clt_update_default(self): ctx = context.ctx() vals = copy.copy(SAMPLE_CLT) vals["name"] = "protected" vals["is_protected"] = True clt_prot = self.api.cluster_template_create(ctx, vals) clt_prot_id = clt_prot["id"] vals["name"] = "protected_default" vals["is_protected"] = True vals["is_default"] = True clt_prot_def = self.api.cluster_template_create(ctx, vals) clt_prot_def_id = clt_prot_def["id"] # We should not be able to update clt_prot until we remove # the is_protected flag, even if we pass ignore_prot_on_def UPDATE_NAME = "UpdatedClusterTemplate" update_values = {"name": UPDATE_NAME} with testtools.ExpectedException(ex.UpdateFailedException): self.api.cluster_template_update(ctx, clt_prot_id, update_values) with testtools.ExpectedException(ex.UpdateFailedException): self.api.cluster_template_update(ctx, clt_prot_id, update_values, ignore_prot_on_def=True) update_values["is_protected"] = False updated_clt = self.api.cluster_template_update(ctx, clt_prot_id, update_values) self.assertEqual(UPDATE_NAME, updated_clt["name"]) # However, for the clt_prot_def we should be able to # override the is_protected check by passing ignore_prot_on_def update_values = {"name": UPDATE_NAME+"default"} with testtools.ExpectedException(ex.UpdateFailedException): self.api.cluster_template_update(ctx, clt_prot_def_id, update_values) updated_clt = self.api.cluster_template_update(ctx, clt_prot_def_id, update_values, ignore_prot_on_def=True) self.assertEqual(UPDATE_NAME+"default", updated_clt["name"]) self.assertTrue(updated_clt["is_default"]) self.assertTrue(updated_clt["is_protected"]) def test_clt_update_with_nulls(self): ctx = context.ctx() clt = self.api.cluster_template_create(ctx, SAMPLE_CLT) clt_id = clt["id"] updated_values = copy.deepcopy(SAMPLE_CLT) for prop, value in six.iteritems( cl_schema.CLUSTER_TEMPLATE_SCHEMA["properties"]): if type(value["type"]) is list and "null" in value["type"]: updated_values[prop] = None # Prove that we can call update on these fields with null values # without an exception self.api.cluster_template_update(ctx, clt_id, updated_values) updated_clt = self.api.cluster_template_get(ctx, clt_id) for prop, value in six.iteritems(updated_values): if value is None: # Conductor populates node groups with [] when # the value given is null if prop == "node_groups": self.assertEqual([], updated_clt[prop]) else: self.assertIsNone(updated_clt[prop]) def test_clt_update_delete_when_protected(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_CLT) sample['is_protected'] = True clt = self.api.cluster_template_create(ctx, sample) clt_id = clt["id"] with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.cluster_template_update(ctx, clt_id, {"name": "tmpl"}) except ex.UpdateFailedException as e: self.assert_protected_resource_exception(e) raise with testtools.ExpectedException(ex.DeletionFailed): try: self.api.cluster_template_destroy(ctx, clt_id) except ex.DeletionFailed as e: self.assert_protected_resource_exception(e) raise self.api.cluster_template_update(ctx, clt_id, {"name": "tmpl", "is_protected": False}) def test_public_clt_update_delete_from_another_tenant(self): ctx = context.ctx() sample = copy.deepcopy(SAMPLE_CLT) sample['is_public'] = True clt = self.api.cluster_template_create(ctx, sample) clt_id = clt["id"] ctx.tenant_id = 'tenant_2' with testtools.ExpectedException(ex.UpdateFailedException): try: self.api.cluster_template_update(ctx, clt_id, {"name": "tmpl"}) except ex.UpdateFailedException as e: self.assert_created_in_another_tenant_exception(e) raise with testtools.ExpectedException(ex.DeletionFailed): try: self.api.cluster_template_destroy(ctx, clt_id) except ex.DeletionFailed as e: self.assert_created_in_another_tenant_exception(e) raise def test_update_clt_on_ngt_update(self): # Prove that cluster templates get updated with proper values # after a referenced node group template is updated ctx = context.ctx() ngt = self.api.node_group_template_create(ctx, SAMPLE_NGT) sample = copy.deepcopy(SAMPLE_CLT) sample["node_groups"] = [ {"node_group_template_id": ngt['id'], "count": 1} ] ct = self.api.cluster_template_create(ctx, sample) UPDATE_FLAVOR = "41" update_values = {"flavor_id": UPDATE_FLAVOR} self.api.node_group_template_update(ctx, ngt["id"], update_values) updated_ct = self.api.cluster_template_get(ctx, ct["id"]) self.assertEqual(UPDATE_FLAVOR, updated_ct["node_groups"][0]["flavor_id"]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/test_api.py0000664000175000017500000001775300000000000022637 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import testtools from sahara import conductor from sahara import context from sahara import exceptions from sahara.tests.unit import base from sahara.utils import general as gu SAMPLE_CLUSTER = { 'plugin_name': 'test_plugin', 'hadoop_version': 'test_version', 'tenant_id': 'tenant_1', 'name': 'test_cluster', 'user_keypair_id': 'my_keypair', 'node_groups': [ { 'name': 'ng_1', 'flavor_id': '42', 'node_processes': ['p1', 'p2'], 'count': 1 }, { 'name': 'ng_2', 'flavor_id': '42', 'node_processes': ['p3', 'p4'], 'count': 3 } ], 'cluster_configs': { 'service_1': { 'config_2': 'value_2' }, 'service_2': { 'config_1': 'value_1' } }, } SAMPLE_NODE_GROUP = { 'name': 'ng_3', 'flavor_id': '42', 'node_processes': ['p5', 'p6'], 'count': 5 } SAMPLE_INSTANCE = { 'instance_name': 'test-name', 'instance_id': '123456', 'management_ip': '1.2.3.1' } SAMPLE_JOB = { "tenant_id": "test_tenant", "name": "job_test", "description": "test_desc", "type": "pig" } SAMPLE_JOB_BINARY = { "tenant_id": "test_tenant", "name": "job_binary_test", "description": "test_dec", "url": "internal-db://test_binary", } class TestConductorApi(base.SaharaWithDbTestCase): def setUp(self): super(TestConductorApi, self).setUp() self.api = conductor.API def _make_sample(self): ctx = context.ctx() cluster = self.api.cluster_create(ctx, SAMPLE_CLUSTER) return ctx, cluster def test_update_by_id(self): ctx, cluster = self._make_sample() self.api.cluster_update(ctx, cluster.id, {'name': 'changed'}) updated_cluster = self.api.cluster_get(ctx, cluster.id) self.assertEqual('changed', updated_cluster['name']) self.api.cluster_destroy(ctx, updated_cluster.id) cluster_list = self.api.cluster_get_all(ctx) self.assertEqual(0, len(cluster_list)) def test_add_node_group_to_cluster_id(self): ctx, cluster = self._make_sample() ng_id = self.api.node_group_add(ctx, cluster.id, SAMPLE_NODE_GROUP) self.assertTrue(ng_id) def test_update_node_group_by_id(self): ctx, cluster = self._make_sample() ng_id = cluster.node_groups[0].id self.api.node_group_update(ctx, ng_id, {'name': 'changed_ng'}) cluster = self.api.cluster_get(ctx, cluster.id) ng = gu.get_by_id(cluster.node_groups, ng_id) self.assertEqual('changed_ng', ng.name) def test_remove_node_group(self): ctx, cluster = self._make_sample() ng = cluster.node_groups[0] self.api.node_group_remove(ctx, ng) cluster = self.api.cluster_get(ctx, cluster.id) self.assertNotIn(ng, cluster.node_groups) def test_add_instance_to_node_group_id(self): ctx, cluster = self._make_sample() inst_id = self.api.instance_add(ctx, cluster.node_groups[0].id, SAMPLE_INSTANCE) self.assertTrue(inst_id) def test_update_instance_by_id(self): ctx, cluster = self._make_sample() ng_id = cluster.node_groups[0].id inst_id = self.api.instance_add(ctx, ng_id, SAMPLE_INSTANCE) self.api.instance_update(ctx, inst_id, {'instance_name': 'tst123'}) cluster = self.api.cluster_get(ctx, cluster.id) ng = gu.get_by_id(cluster.node_groups, ng_id) self.assertEqual('tst123', ng.instances[0].instance_name) def test_instance_volume_ops(self): ctx, cluster = self._make_sample() ng_id = cluster.node_groups[0].id inst_id = self.api.instance_add(ctx, ng_id, SAMPLE_INSTANCE) self.api.append_volume(ctx, inst_id, 0) self.api.append_volume(ctx, inst_id, 1) cluster = self.api.cluster_get(ctx, cluster.id) ng = gu.get_by_id(cluster.node_groups, ng_id) self.assertEqual(2, len(gu.get_by_id(ng.instances, inst_id).volumes)) self.api.remove_volume(ctx, inst_id, 0) cluster = self.api.cluster_get(ctx, cluster.id) ng = gu.get_by_id(cluster.node_groups, ng_id) self.assertEqual(1, len(gu.get_by_id(ng.instances, inst_id).volumes)) def _get_events(self, ctx, cluster_id, step_id=None): cluster = self.api.cluster_get(ctx, cluster_id, show_progress=True) events = [] for step in cluster.provision_progress: if step_id == step['id']: return step['events'] else: events += step['events'] if step_id: return events else: return [] def test_events_ops(self): ctx, cluster = self._make_sample() st_name = "some_name" st_type = "some_type" st_info = "some_info" # test provision step creation step_id = self.api.cluster_provision_step_add(ctx, cluster.id, { 'step_name': st_name, 'step_type': st_type, }) ncluster = self.api.cluster_get(ctx, cluster.id) self.assertEqual(1, len(ncluster['provision_progress'])) provision_step = ncluster['provision_progress'][0] self.assertEqual(st_name, provision_step['step_name']) self.assertEqual(st_type, provision_step['step_type']) self.assertEqual(cluster.id, provision_step['cluster_id']) # test adding event to step and getting events from step self.api.cluster_event_add(ctx, step_id, { 'node_group_id': 'node_group_id', 'instance_id': 'instance_id', 'instance_name': st_name, 'event_info': st_info, 'successful': True }) events = self._get_events(ctx, cluster.id, step_id) self.assertEqual(1, len(events)) self.assertEqual(st_name, events[0].instance_name) self.assertTrue(events[0].successful) self.assertEqual(st_info, events[0].event_info) self.api.cluster_destroy(ctx, cluster.id) with testtools.ExpectedException(exceptions.NotFoundException): self._get_events(ctx, cluster.id, step_id) def test_job_main_name(self): ctx = context.ctx() job_binary = self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY) job_binary_id = job_binary["id"] job_values = copy.copy(SAMPLE_JOB) job_values["mains"] = [job_binary_id] job = self.api.job_create(ctx, job_values) name = self.api.job_main_name(ctx, job) self.assertEqual(SAMPLE_JOB_BINARY["name"], name) def test_job_no_main_name(self): ctx = context.ctx() job = self.api.job_create(ctx, SAMPLE_JOB) name = self.api.job_main_name(ctx, job) self.assertIsNone(name) def test_job_libs_names(self): ctx = context.ctx() job_binary = self.api.job_binary_create(ctx, SAMPLE_JOB_BINARY) job_binary_id_0 = job_binary["id"] jb_1_values = copy.copy(SAMPLE_JOB_BINARY) jb_1_values["name"] = "some_other_name" job_binary = self.api.job_binary_create(ctx, jb_1_values) job_binary_id_1 = job_binary["id"] job_values = copy.copy(SAMPLE_JOB) job_values["libs"] = [job_binary_id_0, job_binary_id_1] job = self.api.job_create(ctx, job_values) names = self.api.job_lib_names(ctx, job) self.assertEqual(["job_binary_test", "some_other_name"], names) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/conductor/test_resource.py0000664000175000017500000002006000000000000023676 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import testtools from sahara.conductor import resource as r from sahara import exceptions as ex from sahara.swift import swift_helper from sahara.utils import edp SAMPLE_DICT = { 'first': [1, 2], 'second': {'a': 1, 'b': 2} } SAMPLE_NESTED_LISTS_DICT = { 'a': [[{'b': 123}]] } SAMPLE_CLUSTER_DICT = { 'name': 'test-cluster', 'cluster_configs': { 'general': { 'some_overridden_config': 'somevalue' } }, 'node_groups': [ { 'name': 'master', 'id': 'some_id' }, { 'id': 'some_id', 'name': 'worker', 'node_processes': ['tasktracker', 'datanode'], 'node_configs': {}, 'instances': [ { 'name': 'test-cluster-001', 'ip': '1.1.1.1' } ] } ] } SAMPLE_JOB_BINARY_DICT = { "created_at": "2014-02-14 16:26:08.895897", "description": "a job", "extra": { "password": "password", "user": "user" }, "id": "c0caf119-f380-4fab-a46e-0f28ebd23b5c", "name": "bob", "tenant_id": "6b859fb8d1f44e8eafdfb91f21309b5f", "updated_at": "null", "url": "swift://bob.sahara/job" } SAMPLE_JOB_BINARY_DICT2 = copy.copy(SAMPLE_JOB_BINARY_DICT) SAMPLE_JOB_BINARY_DICT2["name"] = "bill" SAMPLE_JOB_BINARY_DICT2["id"] = "c0caf119-1111-2222-a46e-0f28ebd23b5c" SAMPLE_JOB_BINARY_DICT2["url"] = "swift://bill.sahara/job" SAMPLE_JOB_DICT = { "tenant_id": "test_tenant", "name": "job_test", "description": "test_desc", "type": "Pig", "mains": [SAMPLE_JOB_BINARY_DICT], "libs": [SAMPLE_JOB_BINARY_DICT2] } SAMPLE_DATA_SOURCE = { 'name': 'input', 'description': 'some input', 'type': 'swift', 'url': 'swift://tmckay.sahara', 'credentials': { 'username': 'me', 'password': 'password' } } SAMPLE_JOB_EXECUTION = { "cluster_id": "7ed1c016-a8a3-4209-9931-6e80f58eea80", "created_at": "2014-02-14 17:46:56.631209", "extra": {}, "id": "1b0b1874-a261-4d1f-971a-a2cebadeba6c", "info": { "actions": [{"conf": "some stuff"}, {"conf": "more stuff"}], "status": edp.JOB_STATUS_PENDING }, "input_id": "b5ddde55-594e-428f-9040-028be81eb3c2", "job_configs": { "args": [ "bob", "bill" ], "configs": { swift_helper.HADOOP_SWIFT_PASSWORD: "openstack", swift_helper.HADOOP_SWIFT_USERNAME: "admin", "myfavoriteconfig": 1 }, "proxy_configs": { "proxy_username": "admin", "proxy_password": "openstack" }, "trusts": { "input_id": "9c528755099149b8b7166f3d0fa3bf10", "output_id": "3f2bde9d43ec440381dc9f736481e2b0" } }, "job_id": "d0f3e397-7bef-42f9-a4db-e5a96059246e", "output_id": "f4993830-aa97-4b0b-914a-ab6430f742b6", "tenant_id": "6b859fb8d1f44e8eafdfb91f21309b5f" } class TestResource(testtools.TestCase): def test_resource_creation(self): res = r.Resource(SAMPLE_DICT) self.assertIsInstance(res.first, list) self.assertEqual([1, 2], res.first) self.assertIsInstance(res.second, r.Resource) self.assertEqual(1, res.second.a) self.assertEqual(2, res.second.b) def test_resource_immutability(self): res = r.Resource(SAMPLE_DICT) with testtools.ExpectedException(ex.FrozenClassError): res.first.append(123) with testtools.ExpectedException(ex.FrozenClassError): res.first = 123 with testtools.ExpectedException(ex.FrozenClassError): res.second.a = 123 def test_nested_lists(self): res = r.Resource(SAMPLE_NESTED_LISTS_DICT) self.assertEqual(123, res.a[0][0].b) def test_cluster_resource(self): cluster = r.ClusterResource(SAMPLE_CLUSTER_DICT) self.assertEqual('test-cluster', cluster.name) self.assertEqual('master', cluster.node_groups[0].name) self.assertIsInstance(cluster.node_groups[0], r.NodeGroupResource) self.assertEqual('test-cluster', cluster.node_groups[0].cluster.name) self.assertEqual('test-cluster-001', cluster.node_groups[1].instances[0].name) self.assertIsInstance(cluster.node_groups[1].instances[0], r.InstanceResource) self.assertEqual('worker', cluster.node_groups[1].instances[0].node_group.name) def test_to_dict(self): cluster = r.ClusterResource(SAMPLE_CLUSTER_DICT) self.assertEqual(SAMPLE_CLUSTER_DICT, cluster.to_dict()) def test_to_dict_filtering(self): cluster_dict = copy.deepcopy(SAMPLE_CLUSTER_DICT) cluster_dict['management_private_key'] = 'abacaba' cluster_dict['node_groups'][0]['id'] = 'some_id' cluster = r.ClusterResource(cluster_dict) self.assertEqual(SAMPLE_CLUSTER_DICT, cluster.to_dict()) def test_to_wrapped_dict(self): cluster = r.ClusterResource(SAMPLE_CLUSTER_DICT) wrapped_dict = cluster.to_wrapped_dict() self.assertEqual(1, len(wrapped_dict)) self.assertEqual(SAMPLE_CLUSTER_DICT, wrapped_dict['cluster']) def test_job_binary_filter_extra(self): job_binary = r.JobBinary(SAMPLE_JOB_BINARY_DICT) wrapped_dict = job_binary.to_wrapped_dict() self.assertNotIn('extra', wrapped_dict) def test_data_source_filter_credentials(self): data_source = r.DataSource(SAMPLE_DATA_SOURCE) wrapped_dict = data_source.to_wrapped_dict() self.assertNotIn('credentials', wrapped_dict) def test_job_filter_job_binary(self): job = r.Job(SAMPLE_JOB_DICT) wrapped_dict = job.to_wrapped_dict() self.assertIn('mains', wrapped_dict["job"]) self.assertIn('libs', wrapped_dict["job"]) self.assertNotIn('extra', wrapped_dict["job"]['mains']) self.assertNotIn('extra', wrapped_dict["job"]['libs']) def test_job_execution_filter_credentials(self): job_exec = r.JobExecution(SAMPLE_JOB_EXECUTION) self.assertIn('extra', job_exec) self.assertIn(swift_helper.HADOOP_SWIFT_PASSWORD, job_exec['job_configs']['configs']) self.assertIn(swift_helper.HADOOP_SWIFT_USERNAME, job_exec['job_configs']['configs']) for a in job_exec['info']['actions']: self.assertIn('conf', a) self.assertIn('trusts', job_exec['job_configs']) self.assertIn('input_id', job_exec['job_configs']['trusts']) self.assertIn('output_id', job_exec['job_configs']['trusts']) self.assertIn('proxy_configs', job_exec['job_configs']) self.assertIn('proxy_username', job_exec['job_configs']['proxy_configs']) self.assertIn('proxy_password', job_exec['job_configs']['proxy_configs']) wrapped_dict = job_exec.to_wrapped_dict()['job_execution'] self.assertNotIn('extra', wrapped_dict) configs = wrapped_dict['job_configs']['configs'] self.assertEqual("", configs[swift_helper.HADOOP_SWIFT_PASSWORD]) self.assertEqual("", configs[swift_helper.HADOOP_SWIFT_USERNAME]) for a in wrapped_dict['info']['actions']: self.assertNotIn('conf', a) self.assertNotIn('trusts', wrapped_dict['job_configs']) self.assertNotIn('proxy_configs', wrapped_dict['job_configs']) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.753891 sahara-16.0.0/sahara/tests/unit/db/0000775000175000017500000000000000000000000017025 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/__init__.py0000664000175000017500000000000000000000000021124 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.753891 sahara-16.0.0/sahara/tests/unit/db/migration/0000775000175000017500000000000000000000000021016 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/migration/__init__.py0000664000175000017500000000000000000000000023115 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/migration/test_db_manage_cli.py0000664000175000017500000000616400000000000025162 0ustar00zuulzuul00000000000000# Copyright 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from unittest import mock import testscenarios import testtools from sahara.db.migration import cli class TestCli(testtools.TestCase): func_name = '' exp_args = () exp_kwargs = {} scenarios = [ ('stamp', dict(argv=['prog', 'stamp', 'foo'], func_name='stamp', exp_args=('foo',), exp_kwargs={'sql': False})), ('stamp-sql', dict(argv=['prog', 'stamp', 'foo', '--sql'], func_name='stamp', exp_args=('foo',), exp_kwargs={'sql': True})), ('current', dict(argv=['prog', 'current'], func_name='current', exp_args=[], exp_kwargs=dict())), ('history', dict(argv=['prog', 'history'], func_name='history', exp_args=[], exp_kwargs=dict())), ('check_migration', dict(argv=['prog', 'check_migration'], func_name='branches', exp_args=[], exp_kwargs=dict())), ('sync_revision_autogenerate', dict(argv=['prog', 'revision', '--autogenerate', '-m', 'message'], func_name='revision', exp_args=(), exp_kwargs={ 'message': 'message', 'sql': False, 'autogenerate': True})), ('sync_revision_sql', dict(argv=['prog', 'revision', '--sql', '-m', 'message'], func_name='revision', exp_args=(), exp_kwargs={ 'message': 'message', 'sql': True, 'autogenerate': False})), ('upgrade-sql', dict(argv=['prog', 'upgrade', '--sql', 'head'], func_name='upgrade', exp_args=('head',), exp_kwargs={'sql': True})), ('upgrade-delta', dict(argv=['prog', 'upgrade', '--delta', '3'], func_name='upgrade', exp_args=('+3',), exp_kwargs={'sql': False})) ] def setUp(self): super(TestCli, self).setUp() do_alembic_cmd_p = mock.patch.object(cli, 'do_alembic_command') self.addCleanup(do_alembic_cmd_p.stop) self.do_alembic_cmd = do_alembic_cmd_p.start() self.addCleanup(cli.CONF.reset) def test_cli(self): with mock.patch.object(sys, 'argv', self.argv): cli.main() self.do_alembic_cmd.assert_has_calls( [mock.call( mock.ANY, self.func_name, *self.exp_args, **self.exp_kwargs)] ) def load_tests(loader, in_tests, pattern): return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/migration/test_migrations.py0000664000175000017500000005474100000000000024616 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # Copyright 2014 Mirantis Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for database migrations. For the opportunistic testing you need to set up a db named 'openstack_citest' with user 'openstack_citest' and password 'openstack_citest' on localhost. The test will then use that db and u/p combo to run the tests. For postgres on Ubuntu this can be done with the following commands: sudo -u postgres psql postgres=# create user openstack_citest with createdb login password 'openstack_citest'; postgres=# create database openstack_citest with owner openstack_citest; """ import os from oslo_db.sqlalchemy import test_base from oslo_db.sqlalchemy import utils as db_utils from sahara.tests.unit.db.migration import test_migrations_base as base class SaharaMigrationsCheckers(object): def assertColumnExists(self, engine, table, column): t = db_utils.get_table(engine, table) self.assertIn(column, t.c) def assertColumnsExist(self, engine, table, columns): for column in columns: self.assertColumnExists(engine, table, column) def assertColumnType(self, engine, table, column, column_type): t = db_utils.get_table(engine, table) column_ref_type = str(t.c[column].type) self.assertEqual(column_ref_type, column_type) def assertColumnCount(self, engine, table, columns): t = db_utils.get_table(engine, table) self.assertEqual(len(columns), len(t.columns)) def assertColumnNotExists(self, engine, table, column): t = db_utils.get_table(engine, table) self.assertNotIn(column, t.c) def assertIndexExists(self, engine, table, index): t = db_utils.get_table(engine, table) index_names = [idx.name for idx in t.indexes] self.assertIn(index, index_names) def assertIndexMembers(self, engine, table, index, members): self.assertIndexExists(engine, table, index) t = db_utils.get_table(engine, table) index_columns = None for idx in t.indexes: if idx.name == index: index_columns = idx.columns.keys() break self.assertEqual(sorted(members), sorted(index_columns)) def test_walk_versions(self): self.walk_versions(self.engine) def _pre_upgrade_001(self, engine): # Anything returned from this method will be # passed to corresponding _check_xxx method as 'data'. pass def _check_001(self, engine, data): job_binary_internal_columns = [ 'created_at', 'updated_at', 'id', 'tenant_id', 'name', 'data', 'datasize' ] self.assertColumnsExist( engine, 'job_binary_internal', job_binary_internal_columns) self.assertColumnCount( engine, 'job_binary_internal', job_binary_internal_columns) node_group_templates_columns = [ 'created_at', 'updated_at', 'id', 'name', 'description', 'tenant_id', 'flavor_id', 'image_id', 'plugin_name', 'hadoop_version', 'node_processes', 'node_configs', 'volumes_per_node', 'volumes_size', 'volume_mount_prefix', 'floating_ip_pool' ] self.assertColumnsExist( engine, 'node_group_templates', node_group_templates_columns) self.assertColumnCount( engine, 'node_group_templates', node_group_templates_columns) data_sources_columns = [ 'created_at', 'updated_at', 'id', 'tenant_id', 'name', 'description', 'type', 'url', 'credentials' ] self.assertColumnsExist( engine, 'data_sources', data_sources_columns) self.assertColumnCount( engine, 'data_sources', data_sources_columns) cluster_templates_columns = [ 'created_at', 'updated_at', 'id', 'name', 'description', 'cluster_configs', 'default_image_id', 'anti_affinity', 'tenant_id', 'neutron_management_network', 'plugin_name', 'hadoop_version' ] self.assertColumnsExist( engine, 'cluster_templates', cluster_templates_columns) self.assertColumnCount( engine, 'cluster_templates', cluster_templates_columns) job_binaries_columns = [ 'created_at', 'updated_at', 'id', 'tenant_id', 'name', 'description', 'url', 'extra' ] self.assertColumnsExist( engine, 'job_binaries', job_binaries_columns) self.assertColumnCount( engine, 'job_binaries', job_binaries_columns) jobs_columns = [ 'created_at', 'updated_at', 'id', 'tenant_id', 'name', 'description', 'type' ] self.assertColumnsExist(engine, 'jobs', jobs_columns) self.assertColumnCount(engine, 'jobs', jobs_columns) templates_relations_columns = [ 'created_at', 'updated_at', 'id', 'tenant_id', 'name', 'flavor_id', 'image_id', 'node_processes', 'node_configs', 'volumes_per_node', 'volumes_size', 'volume_mount_prefix', 'count', 'cluster_template_id', 'node_group_template_id', 'floating_ip_pool' ] self.assertColumnsExist( engine, 'templates_relations', templates_relations_columns) self.assertColumnCount( engine, 'templates_relations', templates_relations_columns) mains_association_columns = [ 'Job_id', 'JobBinary_id' ] self.assertColumnsExist( engine, 'mains_association', mains_association_columns) self.assertColumnCount( engine, 'mains_association', mains_association_columns) libs_association_columns = [ 'Job_id', 'JobBinary_id' ] self.assertColumnsExist( engine, 'libs_association', libs_association_columns) self.assertColumnCount( engine, 'libs_association', libs_association_columns) clusters_columns = [ 'created_at', 'updated_at', 'id', 'name', 'description', 'tenant_id', 'trust_id', 'is_transient', 'plugin_name', 'hadoop_version', 'cluster_configs', 'default_image_id', 'neutron_management_network', 'anti_affinity', 'management_private_key', 'management_public_key', 'user_keypair_id', 'status', 'status_description', 'info', 'extra', 'cluster_template_id' ] self.assertColumnsExist(engine, 'clusters', clusters_columns) self.assertColumnCount(engine, 'clusters', clusters_columns) node_groups_columns = [ 'created_at', 'updated_at', 'id', 'name', 'tenant_id', 'flavor_id', 'image_id', 'image_username', 'node_processes', 'node_configs', 'volumes_per_node', 'volumes_size', 'volume_mount_prefix', 'count', 'cluster_id', 'node_group_template_id', 'floating_ip_pool' ] self.assertColumnsExist(engine, 'node_groups', node_groups_columns) self.assertColumnCount(engine, 'node_groups', node_groups_columns) job_executions_columns = [ 'created_at', 'updated_at', 'id', 'tenant_id', 'job_id', 'input_id', 'output_id', 'start_time', 'end_time', 'cluster_id', 'info', 'progress', 'oozie_job_id', 'return_code', 'job_configs', 'extra' ] self.assertColumnsExist( engine, 'job_executions', job_executions_columns) self.assertColumnCount( engine, 'job_executions', job_executions_columns) instances_columns = [ 'created_at', 'updated_at', 'id', 'tenant_id', 'node_group_id', 'instance_id', 'instance_name', 'internal_ip', 'management_ip', 'volumes' ] self.assertColumnsExist(engine, 'instances', instances_columns) self.assertColumnCount(engine, 'instances', instances_columns) self._data_001(engine, data) def _data_001(self, engine, data): datasize = 512 * 1024 # 512kB data = os.urandom(datasize) t = db_utils.get_table(engine, 'job_binary_internal') engine.execute(t.insert(), data=data, id='123', name='name') new_data = engine.execute(t.select()).fetchone().data self.assertEqual(data, new_data) engine.execute(t.delete()) def _check_002(self, engine, data): # currently, 002 is just a placeholder pass def _check_003(self, engine, data): # currently, 003 is just a placeholder pass def _check_004(self, engine, data): # currently, 004 is just a placeholder pass def _check_005(self, engine, data): # currently, 005 is just a placeholder pass def _check_006(self, engine, data): # currently, 006 is just a placeholder pass def _pre_upgrade_007(self, engine): desc = 'magic' t = db_utils.get_table(engine, 'clusters') engine.execute(t.insert(), id='123', name='name', plugin_name='pname', hadoop_version='1', management_private_key='2', management_public_key='3', status_description=desc) def _check_007(self, engine, data): t = db_utils.get_table(engine, 'clusters') res = engine.execute(t.select().where(t.c.id == '123')).first() self.assertEqual('magic', res['status_description']) engine.execute(t.delete()) # check that status_description can keep 128kb. # MySQL varchar can not keep more then 64kb desc = 'a' * 128 * 1024 # 128kb t = db_utils.get_table(engine, 'clusters') engine.execute(t.insert(), id='123', name='name', plugin_name='plname', hadoop_version='hversion', management_private_key='1', management_public_key='2', status_description=desc) new_desc = engine.execute(t.select()).fetchone().status_description self.assertEqual(desc, new_desc) engine.execute(t.delete()) def _check_008(self, engine, data): self.assertColumnExists(engine, 'node_group_templates', 'security_groups') self.assertColumnExists(engine, 'node_groups', 'security_groups') self.assertColumnExists(engine, 'templates_relations', 'security_groups') def _check_009(self, engine, data): self.assertColumnExists(engine, 'clusters', 'rollback_info') def _check_010(self, engine, data): self.assertColumnExists(engine, 'node_group_templates', 'auto_security_group') self.assertColumnExists(engine, 'node_groups', 'auto_security_group') self.assertColumnExists(engine, 'templates_relations', 'auto_security_group') self.assertColumnExists(engine, 'node_groups', 'open_ports') def _check_011(self, engine, data): self.assertColumnExists(engine, 'clusters', 'sahara_info') def _check_012(self, engine, data): self.assertColumnExists(engine, 'node_group_templates', 'availability_zone') self.assertColumnExists(engine, 'node_groups', 'availability_zone') self.assertColumnExists(engine, 'templates_relations', 'availability_zone') def _check_014(self, engine, data): self.assertColumnExists(engine, 'node_group_templates', 'volume_type') self.assertColumnExists(engine, 'node_groups', 'volume_type') self.assertColumnExists(engine, 'templates_relations', 'volume_type') def _check_015(self, engine, data): provision_steps_columns = [ 'created_at', 'updated_at', 'id', 'cluster_id', 'tenant_id', 'step_name', 'step_type', 'completed', 'total', 'successful', 'started_at', 'completed_at', ] events_columns = [ 'created_at', 'updated_at', 'id', 'node_group_id', 'instance_id', 'instance_name', 'event_info', 'successful', 'step_id', ] self.assertColumnCount(engine, 'cluster_provision_steps', provision_steps_columns) self.assertColumnsExist(engine, 'cluster_provision_steps', provision_steps_columns) self.assertColumnCount(engine, 'cluster_events', events_columns) self.assertColumnsExist(engine, 'cluster_events', events_columns) def _check_016(self, engine, data): self.assertColumnExists(engine, 'node_group_templates', 'is_proxy_gateway') self.assertColumnExists(engine, 'node_groups', 'is_proxy_gateway') self.assertColumnExists(engine, 'templates_relations', 'is_proxy_gateway') def _check_017(self, engine, data): self.assertColumnNotExists(engine, 'job_executions', 'progress') def _check_018(self, engine, data): self.assertColumnExists(engine, 'node_group_templates', 'volume_local_to_instance') self.assertColumnExists(engine, 'node_groups', 'volume_local_to_instance') self.assertColumnExists(engine, 'templates_relations', 'volume_local_to_instance') def _check_019(self, engine, data): self.assertColumnExists(engine, 'node_group_templates', 'is_default') self.assertColumnExists(engine, 'cluster_templates', 'is_default') def _check_020(self, engine, data): self.assertColumnNotExists(engine, 'cluster_provision_steps', 'completed') self.assertColumnNotExists(engine, 'cluster_provision_steps', 'completed_at') self.assertColumnNotExists(engine, 'cluster_provision_steps', 'started_at') def _check_021(self, engine, data): self.assertColumnExists(engine, 'job_executions', 'data_source_urls') def _check_022(self, engine, data): columns = [ 'created_at', 'updated_at', 'id', 'job_id', 'tenant_id', 'name', 'description', 'mapping_type', 'location', 'value_type', 'required', 'order', 'default' ] self.assertColumnCount(engine, 'job_interface_arguments', columns) self.assertColumnsExist(engine, 'job_interface_arguments', columns) def _check_023(self, engine, data): self.assertColumnExists(engine, 'clusters', 'use_autoconfig') self.assertColumnExists(engine, 'cluster_templates', 'use_autoconfig') self.assertColumnExists(engine, 'node_group_templates', 'use_autoconfig') self.assertColumnExists(engine, 'node_groups', 'use_autoconfig') self.assertColumnExists(engine, 'templates_relations', 'use_autoconfig') def _check_024(self, engine, data): tables = [ 'node_group_templates', 'node_groups', 'templates_relations', 'clusters', 'cluster_templates' ] for table in tables: self.assertColumnExists(engine, table, 'shares') def _check_025(self, engine, data): self.assertColumnType(engine, 'instances', 'internal_ip', 'VARCHAR(45)') self.assertColumnType(engine, 'instances', 'management_ip', 'VARCHAR(45)') def _check_026(self, engine, data): tables = [ 'clusters', 'cluster_templates', 'node_group_templates', 'data_sources', 'job_executions', 'jobs', 'job_binary_internal', 'job_binaries', ] for table in tables: self.assertColumnExists(engine, table, 'is_public') self.assertColumnExists(engine, table, 'is_protected') def _check_027(self, engine, data): self.assertColumnNotExists(engine, 'job_executions', 'oozie_job_id') self.assertColumnExists(engine, 'job_executions', 'engine_job_id') def _check_028(self, engine, data): self.assertColumnExists(engine, 'instances', 'storage_devices_number') def _pre_upgrade_029(self, engine): t = db_utils.get_table(engine, 'node_group_templates') engine.execute(t.insert(), id='123', name='first', plugin_name='plg', hadoop_version='1', flavor_id='1', volumes_per_node=0, is_default=True, is_protected=False) engine.execute(t.insert(), id='124', name='second', plugin_name='plg', hadoop_version='1', flavor_id='1', volumes_per_node=0, is_default=False, is_protected=False) t = db_utils.get_table(engine, 'cluster_templates') engine.execute(t.insert(), id='123', name='name', plugin_name='plg', hadoop_version='1', is_default=True, is_protected=False) engine.execute(t.insert(), id='124', name='name', plugin_name='plg', hadoop_version='1', is_default=False, is_protected=False) def _check_029(self, engine, data): t = db_utils.get_table(engine, 'node_group_templates') res = engine.execute(t.select().where(t.c.id == '123')).first() self.assertTrue(res['is_protected']) res = engine.execute(t.select().where(t.c.id == '124')).first() self.assertFalse(res['is_protected']) engine.execute(t.delete()) t = db_utils.get_table(engine, 'cluster_templates') res = engine.execute(t.select().where(t.c.id == '123')).first() self.assertTrue(res['is_protected']) res = engine.execute(t.select().where(t.c.id == '124')).first() self.assertFalse(res['is_protected']) engine.execute(t.delete()) def _check_030(self, engine, data): health_check_columns = [ 'status', 'name', 'description', 'id', 'verification_id', 'created_at', 'updated_at' ] verification_columns = [ 'status', 'id', 'cluster_id', 'created_at', 'updated_at' ] self.assertColumnCount(engine, 'cluster_verifications', verification_columns) self.assertColumnsExist(engine, 'cluster_verifications', verification_columns) self.assertColumnCount(engine, 'cluster_health_checks', health_check_columns) self.assertColumnsExist(engine, 'cluster_health_checks', health_check_columns) def _check_031(self, engine, data): plugins_data_columns = [ 'name', 'id', 'tenant_id', 'version_labels', 'plugin_labels', 'updated_at', 'created_at' ] self.assertColumnCount(engine, 'plugin_data', plugins_data_columns) self.assertColumnsExist(engine, 'plugin_data', plugins_data_columns) def _check_033(self, engine, data): self.assertColumnExists(engine, 'clusters', 'anti_affinity_ratio') def _check_034(self, engine, data): self.assertColumnExists(engine, 'node_groups', 'boot_from_volume') self.assertColumnExists(engine, 'node_group_templates', 'boot_from_volume') self.assertColumnExists(engine, 'templates_relations', 'boot_from_volume') def _check_035(self, engine, data): for col in ['boot_volume_type', 'boot_volume_availability_zone', 'boot_volume_local_to_instance']: self.assertColumnExists(engine, 'node_groups', col) self.assertColumnExists(engine, 'node_group_templates', col) self.assertColumnExists(engine, 'templates_relations', col) class TestMigrationsMySQL(SaharaMigrationsCheckers, base.BaseWalkMigrationTestCase, base.TestModelsMigrationsSync, test_base.MySQLOpportunisticTestCase): pass class TestMigrationsPostgresql(SaharaMigrationsCheckers, base.BaseWalkMigrationTestCase, base.TestModelsMigrationsSync, test_base.PostgreSQLOpportunisticTestCase): pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/migration/test_migrations_base.py0000664000175000017500000001432200000000000025577 0ustar00zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright 2012-2013 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # # Ripped off from Nova's test_migrations.py # The only difference between Nova and this code is usage of alembic instead # of sqlalchemy migrations. # # There is an ongoing work to extact similar code to oslo incubator. Once it is # extracted we'll be able to remove this file and use oslo. import io import os import alembic from alembic import command from alembic import config as alembic_config from alembic import migration from alembic import script as alembic_script from oslo_config import cfg from oslo_db.sqlalchemy import test_migrations as t_m from oslo_log import log as logging import sahara.db.migration from sahara.db.sqlalchemy import api as sa from sahara.db.sqlalchemy import model_base LOG = logging.getLogger(__name__) CONF = cfg.CONF class BaseWalkMigrationTestCase(object): ALEMBIC_CONFIG = alembic_config.Config( os.path.join(os.path.dirname(sahara.db.migration.__file__), 'alembic.ini') ) ALEMBIC_CONFIG.sahara_config = CONF def _configure(self, engine): """For each type of repository we should do some of configure steps. For migrate_repo we should set under version control our database. For alembic we should configure database settings. For this goal we should use oslo_config and openstack.commom.db.sqlalchemy.session with database functionality (reset default settings and session cleanup). """ CONF.set_override('connection', str(engine.url), group='database') sa.cleanup() def _alembic_command(self, alembic_command, engine, *args, **kwargs): """Most of alembic command return data into output. We should redefine this setting for getting info. """ self.ALEMBIC_CONFIG.stdout = buf = io.StringIO() CONF.set_override('connection', str(engine.url), group='database') sa.cleanup() getattr(command, alembic_command)(*args, **kwargs) res = buf.getvalue().strip() LOG.debug('Alembic command {command} returns: {result}'.format( command=alembic_command, result=res)) sa.cleanup() return res def _get_versions(self): """Stores a list of versions. Since alembic version has a random algorithm of generation (SA-migrate has an ordered autoincrement naming) we should store a list of versions (version for upgrade) for successful testing of migrations in up mode. """ env = alembic_script.ScriptDirectory.from_config(self.ALEMBIC_CONFIG) versions = [] for rev in env.walk_revisions(): versions.append(rev.revision) versions.reverse() return versions def walk_versions(self, engine=None): # Determine latest version script from the repo, then # upgrade from 1 through to the latest, with no data # in the databases. This just checks that the schema itself # upgrades successfully. self._configure(engine) versions = self._get_versions() for ver in versions: self._migrate_up(engine, ver, with_data=True) def _get_version_from_db(self, engine): """Returns latest version from db for each type of migrate repo.""" conn = engine.connect() try: context = migration.MigrationContext.configure(conn) version = context.get_current_revision() or '-1' finally: conn.close() return version def _migrate(self, engine, version, cmd): """Base method for manipulation with migrate repo. It will upgrade or downgrade the actual database. """ self._alembic_command(cmd, engine, self.ALEMBIC_CONFIG, version) def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise check_version = version try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%s" % check_version, None) if pre_upgrade: data = pre_upgrade(engine) self._migrate(engine, version, 'upgrade') self.assertEqual(version, self._get_version_from_db(engine)) if with_data: check = getattr(self, "_check_%s" % check_version, None) if check: check(engine, data) except Exception: LOG.error("Failed to migrate to version {version} on engine " "{engine}".format(version=version, engine=engine)) raise class TestModelsMigrationsSync(t_m.ModelsMigrationsSync): """Class for comparison of DB migration scripts and models. Allows to check if the DB schema obtained by applying of migration scripts is equal to the one produced from models definitions. """ ALEMBIC_CONFIG = alembic_config.Config( os.path.join(os.path.dirname(sahara.db.migration.__file__), 'alembic.ini') ) ALEMBIC_CONFIG.sahara_config = CONF def get_engine(self): return self.engine def db_sync(self, engine): CONF.set_override('connection', str(engine.url), group='database') alembic.command.upgrade(self.ALEMBIC_CONFIG, 'head') def get_metadata(self): return model_base.SaharaBase.metadata ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.753891 sahara-16.0.0/sahara/tests/unit/db/sqlalchemy/0000775000175000017500000000000000000000000021167 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/sqlalchemy/__init__.py0000664000175000017500000000000000000000000023266 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/sqlalchemy/test_types.py0000664000175000017500000001076600000000000023756 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sqlalchemy as sa import testtools from unittest import mock from sahara.db.sqlalchemy import types class JsonEncodedTest(testtools.TestCase): def test_impl(self): impl = types.JsonEncoded.impl self.assertEqual(sa.Text, impl) def test_process_bind_param(self): t = types.JsonEncoded() self.assertEqual('{"a": 1}', t.process_bind_param({"a": 1}, None)) def test_process_bind_param_none(self): t = types.JsonEncoded() self.assertIsNone(t.process_bind_param(None, None)) def test_process_result_value(self): t = types.JsonEncoded() self.assertEqual({"a": 1}, t.process_result_value('{"a": 1}', None)) def test_process_result_value_none(self): t = types.JsonEncoded() self.assertIsNone(t.process_result_value(None, None)) class MutableDictTest(testtools.TestCase): def test_creation(self): sample = {"a": 1, "b": 2} d = types.MutableDict(sample) self.assertEqual(sample, d) def test_coerce_dict(self): sample = {"a": 1, "b": 2} md = types.MutableDict.coerce("test", sample) self.assertEqual(sample, md) self.assertIsInstance(md, types.MutableDict) def test_coerce_mutable_dict(self): sample = {"a": 1, "b": 2} sample_md = types.MutableDict(sample) md = types.MutableDict.coerce("test", sample_md) self.assertEqual(sample, md) self.assertIs(sample_md, md) def test_coerce_unsupported(self): with testtools.ExpectedException(ValueError): types.MutableDict.coerce("test", list()) @mock.patch.object(types.MutableDict, 'changed') def test_changed_on_update(self, m): sample = {"a": 1, "b": 2} d = types.MutableDict(sample) d.update({"b": 3}) self.assertEqual({"a": 1, "b": 3}, d) self.assertEqual(1, m.call_count) @mock.patch.object(types.MutableDict, 'changed') def test_changed_on_setitem(self, m): sample = {"a": 1, "b": 2} d = types.MutableDict(sample) d["b"] = 3 self.assertEqual({"a": 1, "b": 3}, d) self.assertEqual(1, m.call_count) @mock.patch.object(types.MutableDict, 'changed') def test_changed_on_delitem(self, m): sample = {"a": 1, "b": 2} d = types.MutableDict(sample) del d["b"] self.assertEqual({"a": 1}, d) self.assertEqual(1, m.call_count) class MutableListTest(testtools.TestCase): def test_creation(self): sample = [1, 2, 3] d = types.MutableList(sample) self.assertEqual(sample, d) def test_coerce_list(self): sample = [1, 2, 3] md = types.MutableList.coerce("test", sample) self.assertEqual(sample, md) self.assertIsInstance(md, types.MutableList) def test_coerce_mutable_list(self): sample = [1, 2, 3] sample_md = types.MutableList(sample) md = types.MutableList.coerce("test", sample_md) self.assertEqual(sample, md) self.assertIs(sample_md, md) def test_coerce_unsupported(self): with testtools.ExpectedException(ValueError): types.MutableList.coerce("test", dict()) @mock.patch.object(types.MutableList, 'changed') def test_changed_on_append(self, m): sample = [1, 2, 3] lst = types.MutableList(sample) lst.append(4) self.assertEqual([1, 2, 3, 4], lst) self.assertEqual(1, m.call_count) @mock.patch.object(types.MutableList, 'changed') def test_changed_on_setitem(self, m): sample = [1, 2, 3] lst = types.MutableList(sample) lst[2] = 4 self.assertEqual([1, 2, 4], lst) self.assertEqual(1, m.call_count) @mock.patch.object(types.MutableList, 'changed') def test_changed_on_delitem(self, m): sample = [1, 2, 3] lst = types.MutableList(sample) del lst[2] self.assertEqual([1, 2], lst) self.assertEqual(1, m.call_count) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.757891 sahara-16.0.0/sahara/tests/unit/db/templates/0000775000175000017500000000000000000000000021023 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/templates/__init__.py0000664000175000017500000000000000000000000023122 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/templates/common.py0000664000175000017500000000477500000000000022702 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six class Command(object): def __init__(self, option_values): for k, v in six.iteritems(option_values): setattr(self, k, v) class Config(object): def __init__(self, option_values=None): self.command = Command(option_values or {}) class Logger(object): def __init__(self): self.clear_log() def warning(self, message): self.warnings.append(message) def info(self, message): self.infos.append(message) def debug(self, message): pass def clear_log(self): self.warnings = [] self.infos = [] SAMPLE_NGT = { "plugin_name": "test_plugin", "flavor_id": "42", "tenant_id": "tenant_1", "hadoop_version": "test_version", "name": "ngt_test", "node_processes": ["p1", "p2"], "floating_ip_pool": None, "availability_zone": None, "node_configs": { "service_1": { "config_1": "value_1" }, "service_2": { "config_1": "value_1" }, }, "is_default": True } SAMPLE_CLT = { "plugin_name": "test_plugin", "tenant_id": "tenant_1", "hadoop_version": "test_version", "name": "clt-test", "cluster_configs": { "service_1": { "config_1": "value_1" }, "service_2": { "config_1": "value_1" } }, "node_groups": [ { "name": "ng_1", "flavor_id": "42", "node_processes": ["p1", "p2"], "count": 1, "floating_ip_pool": None, "security_groups": None, "availability_zone": None, } ], "is_default": True } SAMPLE_CLUSTER = { "name": "test_cluster", "plugin_name": "test_plugin", "hadoop_version": "test_version", "tenant_id": "tenant_1", "node_groups": [ { "name": "ng_1", "node_group_template_id": "ng_1_id", "count": 1, } ] } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/templates/test_delete.py0000664000175000017500000003544500000000000023711 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara import context from sahara.db.templates import api as template_api from sahara.db.templates import utils as u from sahara.tests.unit.conductor import base from sahara.tests.unit.db.templates import common as c class Config(c.Config): def __init__(self, option_values=None): option_values = option_values or {} if "name" not in option_values: option_values["name"] = "delete" super(Config, self).__init__(option_values) class TemplateDeleteTestCase(base.ConductorManagerTestCase): def setUp(self): super(TemplateDeleteTestCase, self).setUp() self.logger = c.Logger() template_api.set_logger(self.logger) def test_node_group_template_delete_by_id(self): self.logger.clear_log() self.setup_context(tenant_id=None) ctx = context.ctx() t = self.api.node_group_template_create(ctx, c.SAMPLE_NGT) option_values = {"tenant_id": t["tenant_id"], "id": t["id"]} template_api.set_conf(Config(option_values)) template_api.do_node_group_template_delete_by_id() msg = 'Deleted node group template {info}'.format( info=u.name_and_id(t)) self.assertIn(msg, self.logger.infos) t = self.api.node_group_template_get(ctx, t["id"]) self.assertIsNone(t) def test_node_group_template_delete_by_id_skipped(self): self.logger.clear_log() self.setup_context(tenant_id=None) ctx = context.ctx() template_values = copy.copy(c.SAMPLE_NGT) template_values["is_default"] = False t = self.api.node_group_template_create(ctx, template_values) option_values = {"tenant_id": t["tenant_id"], "id": t["id"]} template_api.set_conf(Config(option_values)) template_api.do_node_group_template_delete_by_id() msg = ("Deletion of node group template {info} skipped, " "not a default template".format(info=u.name_and_id(t))) self.assertIn(msg, self.logger.warnings) t = self.api.node_group_template_get(ctx, t["id"]) self.assertIsNotNone(t) def test_node_group_template_delete_bad_id(self): self.logger.clear_log() option_values = {"tenant_id": 1, "id": "badid"} template_api.set_conf(Config(option_values)) template_api.do_node_group_template_delete_by_id() msg = ("Deletion of node group template {id} failed, " "no such template".format(id=option_values["id"])) self.assertIn(msg, self.logger.warnings) def test_node_group_template_delete_by_name(self): self.logger.clear_log() ctx = context.ctx() t = self.api.node_group_template_create(ctx, c.SAMPLE_NGT) option_values = {"tenant_id": t["tenant_id"], "template_name": t["name"]} template_api.set_conf(Config(option_values)) template_api.do_node_group_template_delete() msg = 'Deleted node group template {info}'.format( info=u.name_and_id(t)) self.assertIn(msg, self.logger.infos) t = self.api.node_group_template_get(ctx, t["id"]) self.assertIsNone(t) def test_node_group_template_delete_by_name_skipped(self): self.logger.clear_log() ctx = context.ctx() template_values = copy.copy(c.SAMPLE_NGT) template_values["is_default"] = False t = self.api.node_group_template_create(ctx, template_values) option_values = {"tenant_id": t["tenant_id"], "template_name": t["name"]} template_api.set_conf(Config(option_values)) template_api.do_node_group_template_delete() msg = ("Deletion of node group template {name} failed, " "no such template".format(name=t["name"])) self.assertIn(msg, self.logger.warnings) t = self.api.node_group_template_get(ctx, t["id"]) self.assertIsNotNone(t) def test_node_group_template_delete_in_use(self): self.logger.clear_log() ctx = context.ctx() t = self.api.node_group_template_create(ctx, c.SAMPLE_NGT) # Make a cluster that references the node group template cluster_values = copy.deepcopy(c.SAMPLE_CLUSTER) cluster_values["node_groups"][0]["node_group_template_id"] = t["id"] cl = self.api.cluster_create(ctx, cluster_values) # Make a cluster template that references the node group template cluster_temp_values = copy.deepcopy(c.SAMPLE_CLT) cluster_temp_values["node_groups"] = cluster_values["node_groups"] clt = self.api.cluster_template_create(ctx, cluster_temp_values) # Set up the expected messages msgs = ["Node group template {info} in use " "by clusters {clusters}".format( info=u.name_and_id(t), clusters=[cl["name"]])] msgs += ["Node group template {info} in use " "by cluster templates {cluster_temps}".format( info=u.name_and_id(t), cluster_temps=[clt["name"]])] msgs += ["Deletion of node group template {info} failed".format( info=u.name_and_id(t))] # Check delete by name option_values = {"tenant_id": t["tenant_id"], "template_name": t["name"]} template_api.set_conf(Config(option_values)) template_api.do_node_group_template_delete() for msg in msgs: self.assertIn(msg, self.logger.warnings) self.logger.clear_log() # Check again with delete by id option_values = {"tenant_id": t["tenant_id"], "id": t["id"]} template_api.set_conf(Config(option_values)) template_api.do_node_group_template_delete_by_id() for msg in msgs: self.assertIn(msg, self.logger.warnings) self.logger.clear_log() def test_cluster_template_delete_by_id(self): self.logger.clear_log() self.setup_context(tenant_id=None) ctx = context.ctx() t = self.api.cluster_template_create(ctx, c.SAMPLE_CLT) option_values = {"tenant_id": t["tenant_id"], "id": t["id"]} template_api.set_conf(Config(option_values)) template_api.do_cluster_template_delete_by_id() msg = 'Deleted cluster template {info}'.format( info=u.name_and_id(t)) self.assertIn(msg, self.logger.infos) t = self.api.cluster_template_get(ctx, t["id"]) self.assertIsNone(t) def test_cluster_template_delete_by_id_skipped(self): self.logger.clear_log() ctx = context.ctx() template_values = copy.copy(c.SAMPLE_CLT) template_values["is_default"] = False t = self.api.cluster_template_create(ctx, template_values) option_values = {"tenant_id": t["tenant_id"], "id": t["id"]} template_api.set_conf(Config(option_values)) template_api.do_cluster_template_delete_by_id() msg = ("Deletion of cluster template {info} skipped, " "not a default template".format(info=u.name_and_id(t))) self.assertIn(msg, self.logger.warnings) t = self.api.cluster_template_get(ctx, t["id"]) self.assertIsNotNone(t) def test_cluster_template_delete_bad_id(self): self.logger.clear_log() option_values = {"tenant_id": 1, "id": "badid"} template_api.set_conf(Config(option_values)) template_api.do_cluster_template_delete_by_id() msg = ("Deletion of cluster template {id} failed, " "no such template".format(id=option_values["id"])) self.assertIn(msg, self.logger.warnings) def test_cluster_template_delete_by_name(self): self.logger.clear_log() ctx = context.ctx() t = self.api.cluster_template_create(ctx, c.SAMPLE_NGT) option_values = {"tenant_id": t["tenant_id"], "template_name": t["name"]} template_api.set_conf(Config(option_values)) template_api.do_cluster_template_delete() msg = 'Deleted cluster template {info}'.format( info=u.name_and_id(t)) self.assertIn(msg, self.logger.infos) t = self.api.cluster_template_get(ctx, t["id"]) self.assertIsNone(t) def test_cluster_template_delete_by_name_skipped(self): self.logger.clear_log() ctx = context.ctx() template_values = copy.copy(c.SAMPLE_NGT) template_values["is_default"] = False t = self.api.cluster_template_create(ctx, template_values) option_values = {"tenant_id": t["tenant_id"], "template_name": t["name"]} template_api.set_conf(Config(option_values)) template_api.do_cluster_template_delete() msg = ("Deletion of cluster template {name} failed, " "no such template".format(name=t["name"])) self.assertIn(msg, self.logger.warnings) t = self.api.cluster_template_get(ctx, t["id"]) self.assertIsNotNone(t) def test_cluster_template_delete_in_use(self): self.logger.clear_log() ctx = context.ctx() t = self.api.cluster_template_create(ctx, c.SAMPLE_CLT) # Make a cluster that references the cluster template cluster_values = copy.deepcopy(c.SAMPLE_CLUSTER) cluster_values["cluster_template_id"] = t["id"] del cluster_values["node_groups"] cl = self.api.cluster_create(ctx, cluster_values) # Set up the expected messages msgs = ["Cluster template {info} in use " "by clusters {clusters}".format( info=u.name_and_id(t), clusters=[cl["name"]])] msgs += ["Deletion of cluster template {info} failed".format( info=u.name_and_id(t))] # Check delete by name option_values = {"tenant_id": t["tenant_id"], "template_name": t["name"]} template_api.set_conf(Config(option_values)) template_api.do_cluster_template_delete() for msg in msgs: self.assertIn(msg, self.logger.warnings) self.logger.clear_log() # Check again with delete by id option_values = {"tenant_id": t["tenant_id"], "id": t["id"]} template_api.set_conf(Config(option_values)) template_api.do_cluster_template_delete_by_id() for msg in msgs: self.assertIn(msg, self.logger.warnings) self.logger.clear_log() def _make_templates(self, ctx, name, plugin_name, plugin_version): # Make a node group template values = copy.copy(c.SAMPLE_NGT) values["name"] = "ngt_" + name values["plugin_name"] = plugin_name values["hadoop_version"] = plugin_version ngt = self.api.node_group_template_create(ctx, values) # Make a cluster template that references the node group template values = copy.deepcopy(c.SAMPLE_CLT) values["name"] = "clt_" + name values["plugin_name"] = plugin_name values["hadoop_version"] = plugin_version values["node_groups"][0]["node_group_template_id"] = ngt["id"] clt = self.api.cluster_template_create(ctx, values) return ngt, clt def test_do_delete(self): self.logger.clear_log() ctx = context.ctx() # Make some plugins to delete ngt, clt = self._make_templates(ctx, "first", "plugin", "v1") # Make some more for the same plugin, different version ngt2, clt2 = self._make_templates(ctx, "second", "plugin", "v2") # Make another set for a different plugin, overlapping version safe_ngt, safe_clt = self._make_templates(ctx, "third", "plugin2", "v1") # Run a delete by plugin name/version for the first set option_values = {"tenant_id": ngt["tenant_id"], "plugin_name": [ngt["plugin_name"]], "plugin_version": [ngt["hadoop_version"]]} template_api.set_conf(Config(option_values)) # Should delete clt and then ngt, check for messages in order template_api.do_delete() msgs = ["Deleted cluster template {info}".format( info=u.name_and_id(clt))] msgs += ["Deleted node group template {info}".format( info=u.name_and_id(ngt))] self.assertEqual(msgs, self.logger.infos) self.assertIsNone(self.api.node_group_template_get(ctx, ngt["id"])) self.assertIsNone(self.api.cluster_template_get(ctx, clt["id"])) # Make sure the other templates are still there self.assertIsNotNone(self.api.node_group_template_get(ctx, ngt2["id"])) self.assertIsNotNone(self.api.cluster_template_get(ctx, clt2["id"])) self.assertIsNotNone(self.api.node_group_template_get(ctx, safe_ngt["id"])) self.assertIsNotNone(self.api.cluster_template_get(ctx, safe_clt["id"])) # Run delete again for the plugin but with no version specified self.logger.clear_log() option_values = {"tenant_id": ngt2["tenant_id"], "plugin_name": [ngt2["plugin_name"]], "plugin_version": None} template_api.set_conf(Config(option_values)) # Should delete clt2 and then ngt2, check for messages in order template_api.do_delete() msgs = ["Deleted cluster template {info}".format( info=u.name_and_id(clt2))] msgs += ["Deleted node group template {info}".format( info=u.name_and_id(ngt2))] self.assertEqual(msgs, self.logger.infos) self.assertIsNone(self.api.node_group_template_get(ctx, ngt2["id"])) self.assertIsNone(self.api.cluster_template_get(ctx, clt2["id"])) # Make sure the other templates are still there self.assertIsNotNone(self.api.node_group_template_get(ctx, safe_ngt["id"])) self.assertIsNotNone(self.api.cluster_template_get(ctx, safe_clt["id"])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/templates/test_update.py0000664000175000017500000007571400000000000023734 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import tempfile from unittest import mock import jsonschema from oslo_serialization import jsonutils as json from oslo_utils import uuidutils from sahara import context from sahara.db.templates import api as template_api from sahara.db.templates import utils as u from sahara.tests.unit.conductor import base from sahara.tests.unit.db.templates import common as c cluster_json = { "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_groups": [ { "name": "worker", "count": 3, "node_group_template_id": "{vanilla-260-default-worker}" }, { "name": "master", "count": 1, "node_group_template_id": "{vanilla-260-default-master}" } ], "name": "vanilla-260-default-cluster", "neutron_management_network": "{neutron_management_network}", "cluster_configs": {} } master_json = { "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_processes": [ "namenode", "resourcemanager", "hiveserver" ], "name": "vanilla-260-default-master", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", 'security_groups': "{security_groups}" } worker_json = { "plugin_name": "vanilla", "hadoop_version": "2.7.1", "node_processes": [ "nodemanager", "datanode" ], "name": "vanilla-260-default-worker", "floating_ip_pool": "{floating_ip_pool}", "flavor_id": "{flavor_id}", "auto_security_group": "{auto_security_group}", 'security_groups': "{security_groups}" } class Config(c.Config): def __init__(self, option_values=None): option_values = option_values or {} if "name" not in option_values: option_values["name"] = "update" super(Config, self).__init__(option_values) class TemplateUpdateTestCase(base.ConductorManagerTestCase): def setUp(self): super(TemplateUpdateTestCase, self).setUp() self.logger = c.Logger() template_api.set_logger(self.logger) @mock.patch("sahara.utils.api_validator.ApiValidator.validate") def test_check_cluster_templates_valid(self, validate): self.logger.clear_log() ng_templates = [{"template": c.SAMPLE_NGT, "path": "/foo"}] # Reference the node group template by name clt = copy.copy(c.SAMPLE_CLT) clt["node_groups"] = [ {"name": "test", "count": 1, "node_group_template_id": "{%s}" % c.SAMPLE_NGT["name"]} ] cl_templates = [{"template": clt, "path": "/bar"}] # Test failed validation validate.side_effect = jsonschema.ValidationError("mistake") res = template_api.check_cluster_templates_valid(ng_templates, cl_templates) self.assertTrue(res) msg = "Validation for /bar failed, mistake" self.assertIn(msg, self.logger.warnings) # Validation passes, name replaced validate.side_effect = None self.logger.clear_log() res = template_api.check_cluster_templates_valid(ng_templates, cl_templates) self.assertFalse(res) node_groups = validate.call_args[0][0]["node_groups"] self.assertTrue(uuidutils.is_uuid_like( node_groups[0]["node_group_template_id"])) def test_add_config_section(self): # conf here can't be a mock.Mock() because hasattr will # return true conf = Config() conf.register_group = mock.Mock() conf.register_opts = mock.Mock() template_api.set_conf(conf) opts = ["option"] # Named config section template_api.add_config_section("section", opts) self.assertEqual(1, conf.register_group.call_count) config_group = conf.register_group.call_args[0][0] self.assertEqual("section", config_group.name) self.assertEqual([ mock.call(opts, config_group)], conf.register_opts.call_args_list) conf.register_group.reset_mock() conf.register_opts.reset_mock() # No config section, opts should be registered against # the default section template_api.add_config_section(None, opts) conf.register_group.assert_not_called() conf.register_opts.assert_called_with(opts) @mock.patch("sahara.db.templates.api.add_config_section") def test_add_config_section_for_template(self, add_config_section): conf = mock.Mock() conf.list_all_sections = mock.Mock() template_api.set_conf(conf) # No config sections conf.list_all_sections.return_value = [] ngt = c.SAMPLE_NGT template_api.add_config_section_for_template(ngt) add_config_section.assert_called_with(None, template_api.all_template_opts) add_config_section.reset_mock() # Add config section matching plugin conf.list_all_sections.return_value += [ngt["plugin_name"]] template_api.add_config_section_for_template(ngt) add_config_section.assert_called_with(ngt["plugin_name"], template_api.all_template_opts) add_config_section.reset_mock() # Add config section matching plugin and version section = "{plugin_name}_{hadoop_version}".format(**ngt) conf.list_all_sections.return_value += [section] template_api.add_config_section_for_template(ngt) add_config_section.assert_called_with(section, template_api.all_template_opts) add_config_section.reset_mock() # Add config section matching plugin, version and name section = "{plugin_name}_{hadoop_version}_{name}".format(**ngt) conf.list_all_sections.return_value += [section] template_api.add_config_section_for_template(ngt) add_config_section.assert_called_with( section, template_api.node_group_template_opts) add_config_section.reset_mock() # Add config section matching name section = "{name}".format(**ngt) conf.list_all_sections.return_value += [section] template_api.add_config_section_for_template(ngt) add_config_section.assert_called_with( section, template_api.node_group_template_opts) add_config_section.reset_mock() def test_substitute_config_values_ngt(self): ngt = copy.copy(c.SAMPLE_NGT) ngt["flavor_id"] = "{flavor_id}" ngt["floating_ip_pool"] = "{floating_ip_pool}" configs = {"flavor_id": "2", "floating_ip_pool": None} template_api.substitute_config_values(configs, ngt, "/path") self.assertEqual("2", ngt["flavor_id"]) self.assertIsNone(ngt["floating_ip_pool"]) def test_substitute_config_values_clt(self): clt = copy.copy(c.SAMPLE_CLT) clt["neutron_management_network"] = "{neutron_management_network}" clt["default_image_id"] = "{default_image_id}" netid = uuidutils.generate_uuid() configs = {"neutron_management_network": netid, "default_image_id": None} template_api.substitute_config_values(configs, clt, "/path") self.assertEqual(netid, clt["neutron_management_network"]) self.assertIsNone(clt["default_image_id"]) def _write_files(self, tempdir, templates): files = [] for template in templates: fp = tempfile.NamedTemporaryFile(suffix=".json", mode="w", dir=tempdir, delete=False) json.dump(template, fp) files.append(fp.name) fp.close() return files @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_process_files(self, add_config_section, get_configs): self.logger.clear_log() tempdir = tempfile.mkdtemp() # This should be ignored by process files some_other_json = {"name": "fred", "description": "not a template"} files = self._write_files( tempdir, [cluster_json, master_json, worker_json, some_other_json]) get_configs.return_value = {"flavor_id": '2', 'security_groups': [], 'auto_security_group': False} option_values = {"plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) # Check that cluster and ng templates are read and returned ng_templates, cl_templates = template_api.process_files(tempdir, files) cl_temp_names = [f["template"]["name"] for f in cl_templates] ng_temp_names = [f["template"]["name"] for f in ng_templates] self.assertEqual([cluster_json["name"]], cl_temp_names) self.assertEqual([master_json["name"], worker_json["name"]], ng_temp_names) # Plugin name/version filtering applied option_values = {"plugin_name": "vanilla", "plugin_version": "2.7.1"} template_api.set_conf(Config(option_values)) ng_templates, cl_templates = template_api.process_files(tempdir, files) self.assertEqual(1, len(cl_templates)) self.assertEqual(2, len(ng_templates)) option_values = {"plugin_name": "hdp", "plugin_version": "2.7.1"} template_api.set_conf(Config(option_values)) ng_templates, cl_templates = template_api.process_files(tempdir, files) self.assertEqual(0, len(cl_templates)) self.assertEqual(0, len(ng_templates)) @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_process_files_validation_error(self, add_config_section, get_configs): self.logger.clear_log() tempdir = tempfile.mkdtemp() files = self._write_files( tempdir, [cluster_json, master_json, worker_json]) get_configs.return_value = { "flavor_id": '2', 'security_groups': [], 'auto_security_group': False } option_values = {"plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) # Bad JSON validation for ng should cause all files to be skipped bad_worker = copy.copy(worker_json) bad_worker["my_dog"] = ["fido"] new_file = self._write_files(tempdir, [bad_worker])[0] ng_templates, cl_templates = template_api.process_files( tempdir, files + [new_file]) self.assertEqual(0, len(ng_templates)) self.assertEqual(0, len(cl_templates)) msg = ("Validation for {path} failed, " "Additional properties are not allowed".format(path=new_file)) self.assertTrue(self.logger.warnings[0].startswith(msg)) @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_process_files_bad_json(self, add_config_section, get_configs): self.logger.clear_log() tempdir = tempfile.mkdtemp() files = self._write_files( tempdir, [cluster_json, master_json, worker_json]) get_configs.return_value = {"flavor_id": '2', 'security_groups': [], 'auto_security_group': False} option_values = {"plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) # Invalid JSON should cause all files to be skipped fp = tempfile.NamedTemporaryFile(suffix=".json", dir=tempdir, delete=False) fp.write(b"not json") files += [fp.name] fp.close() ng_templates, cl_templates = template_api.process_files(tempdir, files) self.assertEqual(0, len(ng_templates)) self.assertEqual(0, len(cl_templates)) msg = ("Error processing {name}".format(name=files[-1])) self.assertTrue(self.logger.warnings[0].startswith(msg)) msg = ("Skipping processing for {dir}, " "error processing files".format(dir=tempdir)) self.assertEqual(msg, self.logger.warnings[1]) def test_add_node_group_templates(self): self.logger.clear_log() ctx = context.ctx() # Create a record that will be updated in the db existing = copy.copy(c.SAMPLE_NGT) existing = self.api.node_group_template_create(ctx, existing) # Create the update update = copy.copy(c.SAMPLE_NGT) update["flavor_id"] = "6" # Create a record that will be new in the db new = copy.copy(c.SAMPLE_NGT) new["name"] = "new_name" ngts = [{"template": update, "path": "foo"}, {"template": new, "path": "bar"}] ng_info, error = template_api.add_node_group_templates(ctx, ngts) self.assertFalse(error) new = self.api.node_group_template_get_all(ctx, name=new["name"])[0] self.assertIsNotNone(new) # ng_info["created"] is a list of templates that were created self.assertEqual(1, len(ng_info["created"])) self.assertEqual(new["id"], ng_info["created"][0]["id"]) # ng_info["updated"] is a list of tuples for templates that # were updated. First element in the tuple is the template, # second is a dictionary of fields that were updated. self.assertEqual(1, len(ng_info["updated"])) self.assertEqual(existing["id"], ng_info["updated"][0][0]["id"]) self.assertEqual({"flavor_id": "42"}, ng_info["updated"][0][1]) # ng_info["dict"] is a dictionary of name/id pairs self.assertEqual({new["name"]: new["id"], existing["name"]: existing["id"]}, ng_info["ids"]) msg = ("Created node group template {info} from bar".format( info=u.name_and_id(new))) self.assertIn(msg, self.logger.infos) msg = ("Updated node group template {info} from foo".format( info=u.name_and_id(existing))) self.assertIn(msg, self.logger.infos) self.api.node_group_template_destroy(ctx, new["id"], ignore_prot_on_def=True) self.api.node_group_template_destroy(ctx, existing["id"], ignore_prot_on_def=True) @mock.patch("sahara.conductor.API.node_group_template_update") @mock.patch("sahara.db.templates.api.reverse_node_group_template_creates") @mock.patch("sahara.db.templates.api.reverse_node_group_template_updates") def test_add_node_group_templates_update_failed(self, reverse_updates, reverse_creates, ng_update): self.logger.clear_log() ctx = context.ctx() ng_update.side_effect = Exception("mistake") # Create a record that will be updated in the db existing = copy.copy(c.SAMPLE_NGT) existing = self.api.node_group_template_create(ctx, existing) # Create the update update = copy.copy(c.SAMPLE_NGT) update["flavor_id"] = "6" # Create a record that will be new in the db new = copy.copy(c.SAMPLE_NGT) new["name"] = "new_name" ngts = [{"template": new, "path": "bar"}, {"template": update, "path": "foo"}] ng_info, error = template_api.add_node_group_templates(ctx, ngts) new = self.api.node_group_template_get_all(ctx, name=new["name"])[0] self.assertTrue(error) self.assertEqual(1, reverse_creates.call_count) # call should have been (ctx, [new]) self.assertEqual(new["id"], reverse_creates.call_args[0][1][0]["id"]) self.assertEqual(1, reverse_updates.call_count) msg = ("Update of node group template {info} failed, mistake".format( info=u.name_and_id(existing))) self.assertIn(msg, self.logger.warnings) self.api.node_group_template_destroy(ctx, new["id"], ignore_prot_on_def=True) self.api.node_group_template_destroy(ctx, existing["id"], ignore_prot_on_def=True) @mock.patch("sahara.conductor.API.node_group_template_create") @mock.patch("sahara.db.templates.api.reverse_node_group_template_creates") @mock.patch("sahara.db.templates.api.reverse_node_group_template_updates") def test_add_node_group_templates_create_failed(self, reverse_updates, reverse_creates, ng_create): self.logger.clear_log() ctx = context.ctx() ng_create.side_effect = Exception("mistake") # Create a record that will be updated in the db existing = copy.copy(c.SAMPLE_NGT) existing = self.api.node_group_template_create(ctx, existing) # Create the update update = copy.copy(c.SAMPLE_NGT) update["flavor_id"] = "6" # Create a record that will be new in the db new = copy.copy(c.SAMPLE_NGT) new["name"] = "new_name" ngts = [{"template": update, "path": "foo"}, {"template": new, "path": "bar"}] ng_info, error = template_api.add_node_group_templates(ctx, ngts) self.assertTrue(error) self.assertEqual(1, reverse_creates.call_count) self.assertEqual(1, reverse_updates.call_count) # call should have been (ctx, [(existing, updated_fields)]) self.assertEqual({"flavor_id": existing["flavor_id"]}, reverse_updates.call_args[0][1][0][1]) msg = "Creation of node group template from bar failed, mistake" self.assertIn(msg, self.logger.warnings) self.api.node_group_template_destroy(ctx, existing["id"], ignore_prot_on_def=True) def test_add_cluster_templates(self): self.logger.clear_log() ctx = context.ctx() # Create a record that will be updated in the db existing = copy.copy(c.SAMPLE_CLT) existing = self.api.cluster_template_create(ctx, existing) # Create the update update = copy.copy(c.SAMPLE_CLT) update["hadoop_version"] = "1" # Create a record that will be new in the db new = copy.copy(c.SAMPLE_CLT) new["name"] = "new_name" clts = [{"template": update, "path": "foo"}, {"template": new, "path": "bar"}] error = template_api.add_cluster_templates(ctx, clts, {}) self.assertFalse(error) new = self.api.cluster_template_get_all(ctx, name=new["name"])[0] self.assertIsNotNone(new) msg = ("Created cluster template {info} from bar".format( info=u.name_and_id(new))) self.assertIn(msg, self.logger.infos) msg = ("Updated cluster template {info} from foo".format( info=u.name_and_id(existing))) self.assertIn(msg, self.logger.infos) self.api.cluster_template_destroy(ctx, new["id"], ignore_prot_on_def=True) self.api.cluster_template_destroy(ctx, existing["id"], ignore_prot_on_def=True) @mock.patch("sahara.conductor.API.cluster_template_update") @mock.patch("sahara.db.templates.api.reverse_cluster_template_creates") @mock.patch("sahara.db.templates.api.reverse_cluster_template_updates") def test_add_cluster_templates_update_failed(self, reverse_updates, reverse_creates, cl_update): self.logger.clear_log() ctx = context.ctx() cl_update.side_effect = Exception("mistake") # Create a record that will be updated in the db existing = copy.copy(c.SAMPLE_CLT) existing = self.api.cluster_template_create(ctx, existing) # Create the update update = copy.copy(c.SAMPLE_CLT) update["hadoop_version"] = "1" # Create a record that will be new in the db new = copy.copy(c.SAMPLE_CLT) new["name"] = "new_name" clts = [{"template": new, "path": "bar"}, {"template": update, "path": "foo"}] error = template_api.add_cluster_templates(ctx, clts, {}) new = self.api.cluster_template_get_all(ctx, name=new["name"])[0] self.assertTrue(error) self.assertEqual(1, reverse_creates.call_count) # call should have been (ctx, [new]) self.assertEqual(new["id"], reverse_creates.call_args[0][1][0]["id"]) self.assertEqual(1, reverse_updates.call_count) msg = ("Update of cluster template {info} failed, mistake".format( info=u.name_and_id(existing))) self.assertIn(msg, self.logger.warnings) self.api.cluster_template_destroy(ctx, new["id"], ignore_prot_on_def=True) self.api.cluster_template_destroy(ctx, existing["id"], ignore_prot_on_def=True) @mock.patch("sahara.conductor.API.cluster_template_create") @mock.patch("sahara.db.templates.api.reverse_cluster_template_creates") @mock.patch("sahara.db.templates.api.reverse_cluster_template_updates") def test_add_cluster_templates_create_failed(self, reverse_updates, reverse_creates, cl_create): self.logger.clear_log() ctx = context.ctx() cl_create.side_effect = Exception("mistake") # Create a record that will be updated in the db existing = copy.copy(c.SAMPLE_CLT) existing = self.api.cluster_template_create(ctx, existing) # Create the update update = copy.copy(c.SAMPLE_CLT) update["hadoop_version"] = "1" # Create a record that will be new in the db new = copy.copy(c.SAMPLE_CLT) new["name"] = "new_name" clts = [{"template": update, "path": "foo"}, {"template": new, "path": "bar"}] error = template_api.add_cluster_templates(ctx, clts, {}) self.assertTrue(error) self.assertEqual(1, reverse_creates.call_count) self.assertEqual(1, reverse_updates.call_count) # call should have been (ctx, [(existing, updated_fields)]) # updated fields will contain hadoop_version and node_groups, # since node_groups is modified by the conductor updated_fields = reverse_updates.call_args[0][1][0][1] self.assertEqual(updated_fields["hadoop_version"], existing["hadoop_version"]) self.assertIn("node_groups", updated_fields) msg = "Creation of cluster template from bar failed, mistake" self.assertIn(msg, self.logger.warnings) self.api.cluster_template_destroy(ctx, existing["id"], ignore_prot_on_def=True) @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_do_update_trash(self, add_config, get_configs): self.logger.clear_log() ctx = context.ctx() tempdir = tempfile.mkdtemp() self._write_files(tempdir, [cluster_json, master_json, worker_json]) get_configs.return_value = { "flavor_id": '2', "neutron_management_network": uuidutils.generate_uuid(), 'auto_security_group': True, 'security_groups': [], } option_values = {"tenant_id": ctx.tenant_id, "directory": tempdir, "norecurse": None, "plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) template_api.do_update() ngs = self.api.node_group_template_get_all(ctx) ng_names = sorted([ng["name"] for ng in ngs]) self.assertEqual(sorted([master_json["name"], worker_json["name"]]), ng_names) clts = self.api.cluster_template_get_all(ctx) clt_names = sorted([clt["name"] for clt in clts]) clts = self.api.cluster_template_get_all(ctx) self.assertEqual([cluster_json["name"]], clt_names) @mock.patch("sahara.db.templates.api.check_cluster_templates_valid") @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_do_update_cluster_invalid(self, add_config, get_configs, clt_valid): self.logger.clear_log() ctx = context.ctx() tempdir = tempfile.mkdtemp() self._write_files(tempdir, [cluster_json, master_json, worker_json]) get_configs.return_value = { "flavor_id": '2', "neutron_management_network": uuidutils.generate_uuid() } option_values = {"tenant_id": ctx.tenant_id, "directory": tempdir, "norecurse": None, "plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) clt_valid.return_value = True template_api.do_update() ngs = self.api.node_group_template_get_all(ctx) self.assertEqual([], ngs) clts = self.api.cluster_template_get_all(ctx) self.assertEqual([], clts) msg = ("Skipping processing for {dir}, " "error processing cluster templates".format(dir=tempdir)) self.assertIn(msg, self.logger.warnings) @mock.patch("sahara.db.templates.api.check_usage_of_existing") @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_do_update_existing_fails(self, add_config, get_configs, check_existing): self.logger.clear_log() ctx = context.ctx() tempdir = tempfile.mkdtemp() self._write_files(tempdir, [cluster_json, master_json, worker_json]) get_configs.return_value = { "flavor_id": '2', "neutron_management_network": uuidutils.generate_uuid() } option_values = {"tenant_id": ctx.tenant_id, "directory": tempdir, "norecurse": None, "plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) check_existing.return_value = True template_api.do_update() ngs = self.api.node_group_template_get_all(ctx) self.assertEqual([], ngs) clts = self.api.cluster_template_get_all(ctx) self.assertEqual([], clts) msg = ("Skipping processing for {dir}, " "templates in use".format(dir=tempdir)) self.assertIn(msg, self.logger.warnings) @mock.patch("sahara.db.templates.api.add_node_group_templates") @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_do_update_add_ngts_fails(self, add_config, get_configs, add_ngts): self.logger.clear_log() ctx = context.ctx() tempdir = tempfile.mkdtemp() self._write_files(tempdir, [cluster_json, master_json, worker_json]) get_configs.return_value = { "flavor_id": '2', "neutron_management_network": uuidutils.generate_uuid() } option_values = {"tenant_id": ctx.tenant_id, "directory": tempdir, "norecurse": None, "plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) add_ngts.return_value = ({}, True) template_api.do_update() ngs = self.api.node_group_template_get_all(ctx) self.assertEqual([], ngs) clts = self.api.cluster_template_get_all(ctx) self.assertEqual([], clts) msg = ("Skipping processing for {dir}, " "error processing node group templates".format(dir=tempdir)) self.assertIn(msg, self.logger.warnings) @mock.patch("sahara.db.templates.api.reverse_node_group_template_creates") @mock.patch("sahara.db.templates.api.reverse_node_group_template_updates") @mock.patch("sahara.db.templates.api.add_cluster_templates") @mock.patch("sahara.db.templates.api.get_configs") @mock.patch("sahara.db.templates.api.add_config_section_for_template") def test_do_update_add_clts_fails(self, add_config, get_configs, add_clts, reverse_ng_updates, reverse_ng_creates): self.logger.clear_log() ctx = context.ctx() tempdir = tempfile.mkdtemp() self._write_files(tempdir, [cluster_json, master_json, worker_json]) get_configs.return_value = { "flavor_id": '2', "neutron_management_network": uuidutils.generate_uuid() } option_values = {"tenant_id": ctx.tenant_id, "directory": tempdir, "norecurse": None, "plugin_name": None, "plugin_version": None} template_api.set_conf(Config(option_values)) add_clts.return_value = True template_api.do_update() self.assertEqual(1, reverse_ng_creates.call_count) self.assertEqual(1, reverse_ng_updates.call_count) clts = self.api.cluster_template_get_all(ctx) self.assertEqual([], clts) msg = ("Skipping processing for {dir}, " "error processing cluster templates".format(dir=tempdir)) self.assertIn(msg, self.logger.warnings) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/templates/test_utils.py0000664000175000017500000001227700000000000023605 0ustar00zuulzuul00000000000000# Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from sahara import context from sahara.db.templates import utils from sahara.tests.unit.conductor import base from sahara.tests.unit.db.templates import common as c class FakeNGT(object): def __init__(self, id): self.node_group_template_id = id class FakeCluster(object): def __init__(self, name, node_groups=None, cluster_template_id=None): self.name = name self.node_groups = node_groups or [] self.cluster_template_id = cluster_template_id class TemplateUtilsTestCase(base.ConductorManagerTestCase): def test_substitute_ng_ids(self): cl = {"node_groups": [{"name": "worker", "node_group_template_id": "{vanilla-worker}", "count": 3}, {"name": "master", "node_group_template_id": "{vanilla-master}", "count": 1}, {"name": "secondary-name", "node_group_template_id": "some_id"}]} ng_dict = {"vanilla-worker": 1, "vanilla-master": 2} utils.substitute_ng_ids(cl, ng_dict) self.assertEqual("1", cl["node_groups"][0]["node_group_template_id"]) self.assertEqual("2", cl["node_groups"][1]["node_group_template_id"]) self.assertEqual("some_id", cl["node_groups"][2]["node_group_template_id"]) def test_check_plugin_version(self): template = {"plugin_name": "vanilla", "hadoop_version": "2.7.1"} self.assertTrue(utils.check_plugin_version(template, None)) self.assertTrue(utils.check_plugin_version(template, ["2.7.1"])) self.assertTrue(utils.check_plugin_version(template, ["vanilla.2.7.1"])) self.assertFalse(utils.check_plugin_version(template, ["1.2.1"])) def test_check_plugin_name_and_version(self): template = {"plugin_name": "vanilla", "hadoop_version": "2.7.1"} self.assertTrue(utils.check_plugin_name_and_version( template, None, ["2.7.1"])) self.assertTrue(utils.check_plugin_name_and_version( template, ["vanilla"], None)) self.assertTrue(utils.check_plugin_name_and_version( template, ["vanilla"], ["2.7.1"])) self.assertTrue(utils.check_plugin_name_and_version( template, ["vanilla"], ["vanilla.2.7.1"])) self.assertFalse(utils.check_plugin_name_and_version( template, ["hdp"], ["2.7.1"])) def test_check_node_group_template_usage(self): ng1 = FakeNGT(1) ng2 = FakeNGT(2) cluster = FakeCluster("cluster", [ng1]) template = FakeCluster("template", [ng2]) cluster_users, template_users = utils.check_node_group_template_usage( 1, [cluster], [template]) self.assertEqual([cluster.name], cluster_users) self.assertEqual([], template_users) cluster_users, template_users = utils.check_node_group_template_usage( 2, [cluster], [template]) self.assertEqual([], cluster_users) self.assertEqual([template.name], template_users) def test_check_cluster_template_usage(self): cluster = FakeCluster("cluster", cluster_template_id=1) cluster_users = utils.check_cluster_template_usage(1, [cluster]) self.assertEqual([cluster.name], cluster_users) def test_find_node_group_template_by_name(self): ctx = context.ctx() t = self.api.node_group_template_create(ctx, c.SAMPLE_NGT) found = utils.find_node_group_template_by_name(ctx, c.SAMPLE_NGT["name"]) self.assertEqual(t["id"], found["id"]) found = utils.find_node_group_template_by_name(ctx, "fred") self.assertIsNone(found) def test_find_cluster_template_by_name(self): ctx = context.ctx() t = self.api.cluster_template_create(ctx, c.SAMPLE_CLT) found = utils.find_cluster_template_by_name(ctx, c.SAMPLE_CLT["name"]) self.assertEqual(t["id"], found["id"]) found = utils.find_cluster_template_by_name(ctx, "fred") self.assertIsNone(found) def test_value_diff(self): current = {"cat": "meow", "dog": "woof", "horse": ["neigh", "whinny"]} new_values = {"dog": "bark", "horse": "snort"} original = copy.deepcopy(current) backup = utils.value_diff(current, new_values) self.assertEqual({"dog": "woof", "horse": ["neigh", "whinny"]}, backup) # current is unchanged self.assertEqual(original, current) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/db/test_utils.py0000664000175000017500000001050600000000000021600 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import testtools from unittest import mock from sahara import context from sahara.db.sqlalchemy import api from sahara.db.sqlalchemy import models as m import sahara.tests.unit.base as base class TestPaginationUtils(testtools.TestCase): def test_get_prev_and_next_objects(self): query = [mock.MagicMock(id=i) for i in range(100)] res = api._get_prev_and_next_objects(query, 5, None) self.assertEqual((None, 4), res) res = api._get_prev_and_next_objects(query, None, None) self.assertEqual((None, None), res) res = api._get_prev_and_next_objects(query, 5, mock.MagicMock(id=42)) self.assertEqual((37, 47), res) res = api._get_prev_and_next_objects(query, 5, mock.MagicMock(id=4)) self.assertEqual((None, 9), res) res = api._get_prev_and_next_objects(query, 5, mock.MagicMock(id=100)) self.assertEqual((None, None), res) def test_parse_sorting_args(self): self.assertEqual(("name", "desc"), api._parse_sorting_args("-name")) self.assertEqual(("name", "asc"), api._parse_sorting_args("name")) class TestRegex(testtools.TestCase): def test_get_regex_op(self): regex_op = api._get_regex_op("mysql://user:passw@localhost/sahara") self.assertEqual("REGEXP", regex_op) regex_op = api._get_regex_op("postgresql://localhost/sahara") self.assertEqual("~", regex_op) regex_op = api._get_regex_op("sqlite://user:passw@localhost/sahara") self.assertIsNone(regex_op) class TestRegexFilter(base.SaharaWithDbTestCase): @mock.patch("sahara.db.sqlalchemy.api._get_regex_op") def test_regex_filter(self, get_regex_op): query = api.model_query(m.ClusterTemplate, context.ctx()) regex_cols = ["name", "description", "plugin_name"] search_opts = {"name": "fred", "hadoop_version": "2", "bogus": "jack", "plugin_name": "vanilla"} # Since regex_op is None remaining_opts should be a copy of search_opts get_regex_op.return_value = None query, remaining_opts = api.regex_filter( query, m.ClusterTemplate, regex_cols, search_opts) self.assertEqual(search_opts, remaining_opts) self.assertIsNot(search_opts, remaining_opts) # Since regex_cols is [] remaining_opts should be a copy of search_opts get_regex_op.return_value = "REGEXP" query, remaining_opts = api.regex_filter( query, m.ClusterTemplate, [], search_opts) self.assertEqual(search_opts, remaining_opts) self.assertIsNot(search_opts, remaining_opts) # Remaining should be search_opts with name and plugin_name removed # These are the only fields that are in regex_cols and also in # the model. get_regex_op.return_value = "REGEXP" query, remaining_opts = api.regex_filter( query, m.ClusterTemplate, regex_cols, search_opts) self.assertEqual({"hadoop_version": "2", "bogus": "jack"}, remaining_opts) # bogus is not in the model so it should be left in remaining # even though regex_cols lists it regex_cols.append("bogus") query, remaining_opts = api.regex_filter( query, m.ClusterTemplate, regex_cols, search_opts) self.assertEqual({"hadoop_version": "2", "bogus": "jack"}, remaining_opts) # name will not be removed because the value is not a string search_opts["name"] = 5 query, remaining_opts = api.regex_filter( query, m.ClusterTemplate, regex_cols, search_opts) self.assertEqual({"hadoop_version": "2", "bogus": "jack", "name": 5}, remaining_opts) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.757891 sahara-16.0.0/sahara/tests/unit/plugins/0000775000175000017500000000000000000000000020121 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/plugins/__init__.py0000664000175000017500000000000000000000000022220 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/plugins/test_base_plugins_support.py0000664000175000017500000000177400000000000026012 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import testtools from sahara import main from sahara.plugins import base as pb class BasePluginsSupportTest(testtools.TestCase): def setUp(self): super(BasePluginsSupportTest, self).setUp() main.CONF.set_override('plugins', ['fake', 'cdh', 'spark']) pb.setup_plugins() def test_plugins_loaded(self): plugins = [p.name for p in pb.PLUGINS.get_plugins()] self.assertIn('fake', plugins) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/plugins/test_images.py0000664000175000017500000005014600000000000023005 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from oslo_utils import uuidutils import yaml from sahara import exceptions as ex from sahara.plugins import exceptions as p_ex from sahara.plugins import images from sahara.tests.unit import base as b class TestImages(b.SaharaTestCase): def test_package_spec(self): cls = images.SaharaPackageValidator validator = cls.from_spec("java", {}, []) self.assertIsInstance(validator, cls) self.assertEqual(str(validator.packages[0]), "java") validator = cls.from_spec({"java": {"version": "8"}}, {}, []) self.assertIsInstance(validator, cls) self.assertEqual(str(validator.packages[0]), "java-8") validator = cls.from_spec( [{"java": {"version": "8"}}, "hadoop"], {}, []) self.assertIsInstance(validator, cls) self.assertEqual(str(validator.packages[0]), "java-8") self.assertEqual(str(validator.packages[1]), "hadoop") def test_script_spec(self): cls = images.SaharaScriptValidator resource_roots = ['tests/unit/plugins'] validator = cls.from_spec('test_images.py', {}, resource_roots) self.assertIsInstance(validator, cls) self.assertEqual(validator.env_vars, ['test_only', 'distro']) validator = cls.from_spec( {'test_images.py': {'env_vars': ['extra-file', 'user']}}, {}, resource_roots) self.assertIsInstance(validator, cls) self.assertEqual(validator.env_vars, ['test_only', 'distro', 'extra-file', 'user']) def test_all_spec(self): cls = images.SaharaAllValidator validator_map = images.SaharaImageValidatorBase.get_validator_map() validator = cls.from_spec( [{'package': {'java': {'version': '8'}}}, {'package': 'hadoop'}], validator_map, []) self.assertIsInstance(validator, cls) self.assertEqual(len(validator.validators), 2) self.assertEqual(validator.validators[0].packages[0].name, 'java') self.assertEqual(validator.validators[1].packages[0].name, 'hadoop') def test_any_spec(self): cls = images.SaharaAnyValidator validator_map = images.SaharaImageValidatorBase.get_validator_map() validator = cls.from_spec( [{'package': {'java': {'version': '8'}}}, {'package': 'hadoop'}], validator_map, []) self.assertIsInstance(validator, cls) self.assertEqual(len(validator.validators), 2) self.assertEqual(validator.validators[0].packages[0].name, 'java') self.assertEqual(validator.validators[1].packages[0].name, 'hadoop') def test_os_case_spec(self): cls = images.SaharaOSCaseValidator validator_map = images.SaharaImageValidatorBase.get_validator_map() spec = [ {'redhat': [{'package': 'nfs-utils'}]}, {'debian': [{'package': 'nfs-common'}]} ] validator = cls.from_spec(spec, validator_map, []) self.assertIsInstance(validator, cls) self.assertEqual(len(validator.distros), 2) self.assertEqual(validator.distros[0].distro, 'redhat') self.assertEqual(validator.distros[1].distro, 'debian') redhat, debian = ( validator.distros[os].validator.validators[0].packages[0].name for os in range(2)) self.assertEqual(redhat, 'nfs-utils') self.assertEqual(debian, 'nfs-common') def test_sahara_image_validator_spec(self): cls = images.SaharaImageValidator validator_map = images.SaharaImageValidatorBase.get_validator_map() resource_roots = ['tests/unit/plugins'] spec = """ arguments: java-version: description: The version of java. default: openjdk required: false choices: - openjdk - oracle-java validators: - os_case: - redhat: - package: nfs-utils - debian: - package: nfs-common - any: - all: - package: java-1.8.0-openjdk-devel - argument_set: argument_name: java-version value: 1.8.0 - all: - package: java-1.7.0-openjdk-devel - argument_set: argument_name: java-version value: 1.7.0 - script: test_images.py - package: - hadoop - hadoop-libhdfs - hadoop-native - hadoop-pipes - hadoop-sbin - hadoop-lzo - lzo - lzo-devel - hadoop-lzo-native - argument_case: argument_name: JAVA_VERSION cases: 1.7.0: - script: test_images.py 1.8.0: - script: test_images.py """ spec = yaml.safe_load(spec) validator = cls.from_spec(spec, validator_map, resource_roots) validators = validator.validators self.assertIsInstance(validator, cls) self.assertEqual(len(validators), 5) self.assertIsInstance(validators[0], images.SaharaOSCaseValidator) self.assertIsInstance(validators[1], images.SaharaAnyValidator) self.assertIsInstance(validators[2], images.SaharaScriptValidator) self.assertIsInstance(validators[3], images.SaharaPackageValidator) self.assertIsInstance( validators[4], images.SaharaArgumentCaseValidator) self.assertEqual(1, len(validator.arguments)) self.assertEqual(validator.arguments['java-version'].required, False) self.assertEqual(validator.arguments['java-version'].default, 'openjdk') self.assertEqual(validator.arguments['java-version'].description, 'The version of java.') self.assertEqual(validator.arguments['java-version'].choices, ['openjdk', 'oracle-java']) def test_package_validator_redhat(self): cls = images.SaharaPackageValidator image_arguments = {"distro": 'centos'} packages = [cls.Package("java", "8")] validator = images.SaharaPackageValidator(packages) remote = mock.Mock() validator.validate(remote, test_only=True, image_arguments=image_arguments) remote.execute_command.assert_called_with( "rpm -q java-8", run_as_root=True) image_arguments = {"distro": 'fedora'} packages = [cls.Package("java", "8"), cls.Package("hadoop")] validator = images.SaharaPackageValidator(packages) remote = mock.Mock() remote.execute_command.side_effect = ( ex.RemoteCommandException("So bad!")) try: validator.validate(remote, test_only=True, image_arguments=image_arguments) except p_ex.ImageValidationError as e: self.assertIn("So bad!", e.message) remote.execute_command.assert_called_with( "rpm -q java-8 hadoop", run_as_root=True) self.assertEqual(remote.execute_command.call_count, 1) image_arguments = {"distro": 'redhat'} packages = [cls.Package("java", "8"), cls.Package("hadoop")] validator = images.SaharaPackageValidator(packages) remote = mock.Mock() def side_effect(call, run_as_root=False): if "rpm" in call: raise ex.RemoteCommandException("So bad!") remote.execute_command.side_effect = side_effect try: validator.validate(remote, test_only=False, image_arguments=image_arguments) except p_ex.ImageValidationError as e: self.assertIn("So bad!", e.message) self.assertEqual(remote.execute_command.call_count, 3) calls = [mock.call("rpm -q java-8 hadoop", run_as_root=True), mock.call("yum install -y java-8 hadoop", run_as_root=True), mock.call("rpm -q java-8 hadoop", run_as_root=True)] remote.execute_command.assert_has_calls(calls) def test_package_validator_debian(self): cls = images.SaharaPackageValidator image_arguments = {"distro": 'ubuntu'} packages = [cls.Package("java", "8")] validator = images.SaharaPackageValidator(packages) remote = mock.Mock() validator.validate(remote, test_only=True, image_arguments=image_arguments) remote.execute_command.assert_called_with( "dpkg -s java-8", run_as_root=True) image_arguments = {"distro": 'ubuntu'} packages = [cls.Package("java", "8"), cls.Package("hadoop")] validator = images.SaharaPackageValidator(packages) remote = mock.Mock() remote.execute_command.side_effect = ( ex.RemoteCommandException("So bad!")) try: validator.validate(remote, test_only=True, image_arguments=image_arguments) except p_ex.ImageValidationError as e: self.assertIn("So bad!", e.message) remote.execute_command.assert_called_with( "dpkg -s java-8 hadoop", run_as_root=True) self.assertEqual(remote.execute_command.call_count, 1) image_arguments = {"distro": 'ubuntu'} packages = [cls.Package("java", "8"), cls.Package("hadoop")] validator = images.SaharaPackageValidator(packages) remote = mock.Mock() remote.execute_command.side_effect = ( ex.RemoteCommandException("So bad!")) try: validator.validate(remote, test_only=False, image_arguments=image_arguments) except p_ex.ImageValidationError as e: self.assertIn("So bad!", e.message) self.assertEqual(remote.execute_command.call_count, 2) calls = [mock.call("dpkg -s java-8 hadoop", run_as_root=True), mock.call("DEBIAN_FRONTEND=noninteractive " + "apt-get -y install java-8 hadoop", run_as_root=True)] remote.execute_command.assert_has_calls(calls) @mock.patch('oslo_utils.uuidutils.generate_uuid') def test_script_validator(self, uuid): hash_value = '00000000-0000-0000-0000-000000000000' uuidutils.generate_uuid.return_value = hash_value cls = images.SaharaScriptValidator image_arguments = {"distro": 'centos'} cmd = b"It's dangerous to go alone. Run this." validator = cls(cmd, env_vars=image_arguments.keys(), output_var="distro") remote = mock.Mock( execute_command=mock.Mock( return_value=(0, 'fedora'))) validator.validate(remote, test_only=False, image_arguments=image_arguments) call = [mock.call('chmod +x /tmp/%(hash_value)s.sh' % {'hash_value': hash_value}, run_as_root=True), mock.call('/tmp/%(hash_value)s.sh' % {'hash_value': hash_value}, run_as_root=True)] remote.execute_command.assert_has_calls(call) self.assertEqual(image_arguments['distro'], 'fedora') def test_any_validator(self): cls = images.SaharaAnyValidator class FakeValidator(images.SaharaImageValidatorBase): def __init__(self, mock_validate): self.mock_validate = mock_validate def validate(self, remote, test_only=False, **kwargs): self.mock_validate(remote, test_only=test_only, **kwargs) # One success short circuits validation always_tells_the_truth = FakeValidator(mock.Mock()) validator = cls([always_tells_the_truth, always_tells_the_truth]) validator.validate(None, test_only=False) self.assertEqual(always_tells_the_truth.mock_validate.call_count, 1) # All failures fails, and calls with test_only=True on all first always_lies = FakeValidator( mock.Mock(side_effect=p_ex.ImageValidationError("Oh no!"))) validator = cls([always_lies, always_lies]) try: validator.validate(None, test_only=False) except p_ex.ImageValidationError: pass self.assertEqual(always_lies.mock_validate.call_count, 4) # But it fails after a first pass if test_only=True. always_lies = FakeValidator( mock.Mock(side_effect=p_ex.ImageValidationError("Oh no!"))) validator = cls([always_lies, always_lies]) try: validator.validate(None, test_only=True) except p_ex.ImageValidationError: pass self.assertEqual(always_lies.mock_validate.call_count, 2) # One failure doesn't end iteration. always_tells_the_truth = FakeValidator(mock.Mock()) always_lies = FakeValidator( mock.Mock(side_effect=p_ex.ImageValidationError("Oh no!"))) validator = cls([always_lies, always_tells_the_truth]) validator.validate(None, test_only=False) self.assertEqual(always_lies.mock_validate.call_count, 1) self.assertEqual(always_tells_the_truth.mock_validate.call_count, 1) def test_all_validator(self): cls = images.SaharaAllValidator # All pass always_tells_the_truth = mock.Mock() validator = cls([always_tells_the_truth, always_tells_the_truth]) validator.validate(None, test_only=False) self.assertEqual(always_tells_the_truth.validate.call_count, 2) always_tells_the_truth.validate.assert_called_with( None, test_only=False, image_arguments=None) # Second fails always_tells_the_truth = mock.Mock() always_lies = mock.Mock(validate=mock.Mock( side_effect=p_ex.ImageValidationError("Boom!"))) validator = cls([always_tells_the_truth, always_lies]) try: validator.validate(None, test_only=True) except p_ex.ImageValidationError: pass self.assertEqual(always_tells_the_truth.validate.call_count, 1) self.assertEqual(always_lies.validate.call_count, 1) always_tells_the_truth.validate.assert_called_with( None, test_only=True, image_arguments=None) always_lies.validate.assert_called_with( None, test_only=True, image_arguments=None) # First fails always_tells_the_truth = mock.Mock() always_lies = mock.Mock(validate=mock.Mock( side_effect=p_ex.ImageValidationError("Boom!"))) validator = cls([always_lies, always_tells_the_truth]) try: validator.validate(None, test_only=True, image_arguments={}) except p_ex.ImageValidationError: pass self.assertEqual(always_lies.validate.call_count, 1) always_lies.validate.assert_called_with( None, test_only=True, image_arguments={}) self.assertEqual(always_tells_the_truth.validate.call_count, 0) def test_os_case_validator(self): cls = images.SaharaOSCaseValidator Distro = images.SaharaOSCaseValidator._distro_tuple # First match wins and short circuits iteration centos = Distro("centos", mock.Mock()) redhat = Distro("redhat", mock.Mock()) distros = [centos, redhat] image_arguments = {images.SaharaImageValidator.DISTRO_KEY: "centos"} validator = cls(distros) validator.validate(None, test_only=False, image_arguments=image_arguments) self.assertEqual(centos.validator.validate.call_count, 1) self.assertEqual(redhat.validator.validate.call_count, 0) centos.validator.validate.assert_called_with( None, test_only=False, image_arguments=image_arguments) # Families match centos = Distro("centos", mock.Mock()) redhat = Distro("redhat", mock.Mock()) distros = [centos, redhat] image_arguments = {images.SaharaImageValidator.DISTRO_KEY: "fedora"} validator = cls(distros) validator.validate(None, test_only=False, image_arguments=image_arguments) self.assertEqual(centos.validator.validate.call_count, 0) self.assertEqual(redhat.validator.validate.call_count, 1) redhat.validator.validate.assert_called_with( None, test_only=False, image_arguments=image_arguments) # Non-matches do nothing centos = Distro("centos", mock.Mock()) redhat = Distro("redhat", mock.Mock()) distros = [centos, redhat] image_arguments = {images.SaharaImageValidator.DISTRO_KEY: "ubuntu"} validator = cls(distros) validator.validate(None, test_only=False, image_arguments=image_arguments) self.assertEqual(centos.validator.validate.call_count, 0) self.assertEqual(redhat.validator.validate.call_count, 0) def test_sahara_argument_case_validator(self): cls = images.SaharaArgumentCaseValidator # Match gets called image_arguments = {"argument": "value"} match = mock.Mock() nomatch = mock.Mock() cases = {"value": match, "another_value": nomatch} validator = cls("argument", cases) validator.validate(None, test_only=False, image_arguments=image_arguments) self.assertEqual(match.validate.call_count, 1) self.assertEqual(nomatch.validate.call_count, 0) match.validate.assert_called_with( None, test_only=False, image_arguments=image_arguments) # Non-matches do nothing image_arguments = {"argument": "value"} nomatch = mock.Mock() cases = {"some_value": nomatch, "another_value": nomatch} validator = cls("argument", cases) validator.validate(None, test_only=False, image_arguments=image_arguments) self.assertEqual(nomatch.validate.call_count, 0) def test_sahara_argument_set_validator(self): cls = images.SaharaArgumentSetterValidator # Old variable is overwritten image_arguments = {"argument": "value"} validator = cls("argument", "new_value") validator.validate(None, test_only=False, image_arguments=image_arguments) self.assertEqual(image_arguments["argument"], "new_value") # New variable is set image_arguments = {"argument": "value"} validator = cls("another_argument", "value") validator.validate(None, test_only=False, image_arguments=image_arguments) self.assertEqual(image_arguments, {"argument": "value", "another_argument": "value"}) def test_sahara_image_validator(self): cls = images.SaharaImageValidator sub_validator = mock.Mock(validate=mock.Mock()) remote = mock.Mock(get_os_distrib=mock.Mock( return_value="centos")) validator = cls(sub_validator, {}) validator.validate(remote, test_only=False, image_arguments={}) expected_map = {images.SaharaImageValidatorBase.DISTRO_KEY: "centos"} sub_validator.validate.assert_called_with( remote, test_only=False, image_arguments=expected_map) expected_map = {images.SaharaImageValidatorBase.DISTRO_KEY: "centos"} validator.validate(remote, test_only=True, image_arguments={}) sub_validator.validate.assert_called_with( remote, test_only=True, image_arguments=expected_map) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/tests/unit/plugins/test_kerberos.py0000664000175000017500000001151700000000000023353 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from unittest import mock from sahara import context from sahara.plugins import kerberos as krb from sahara.tests.unit import base ADD_PRINCIPAL_SCRIPT = """#!/bin/bash mkdir -p /tmp/sahara-kerberos/ kadmin -p sahara/admin <

v

""", doc.toprettyxml(indent=" ")) def test_get_if_not_exist_and_add_to_element(self): doc = self.create_default_doc() elem = x.get_and_create_if_not_exist(doc, 'test', 'tag_to_add') x.add_text_element_to_element(doc, elem, 'p', 'v') self.assertEqual("""

v

""", doc.toprettyxml(indent=" ")) def test_add_tagged_list(self): doc = self.create_default_doc() x.add_tagged_list(doc, 'test', 'list_item', ['a', 'b']) self.assertEqual(""" a b """, doc.toprettyxml(indent=" ")) def test_add_equal_separated_dict(self): doc = self.create_default_doc() x.add_equal_separated_dict(doc, 'test', 'dict_item', {'': 'empty1', None: 'empty2'}) self.assertEqual(""" """, doc.toprettyxml(indent=" ")) x.add_equal_separated_dict(doc, 'test', 'dict_item', {'a': 'b', 'c': 'd'}) self.assertEqual(""" a=b c=d """, doc.toprettyxml(indent=" ")) def create_default_doc(self): doc = xml.Document() test = doc.createElement('test') doc.appendChild(test) return doc def _get_xml_text(self, strip): doc = x.load_xml_document("service/edp/resources/workflow.xml", strip) x.add_child(doc, 'action', 'java') x.add_text_element_to_tag(doc, 'java', 'sometag', 'somevalue') return doc.toprettyxml(indent=" ").split("\n") def test_load_xml_document_strip(self): # Get the lines from the xml docs stripped = set(self._get_xml_text(True)) unstripped = set(self._get_xml_text(False)) # Prove they're different diff = stripped.symmetric_difference(unstripped) self.assertGreater(len(diff), 0) # Prove the differences are only blank lines non_blank_diffs = [l for l in diff if not l.isspace()] self.assertEqual(0, len(non_blank_diffs)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.781891 sahara-16.0.0/sahara/topology/0000775000175000017500000000000000000000000016173 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/topology/__init__.py0000664000175000017500000000000000000000000020272 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.781891 sahara-16.0.0/sahara/topology/resources/0000775000175000017500000000000000000000000020205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/topology/resources/core-template.xml0000664000175000017500000000164400000000000023475 0ustar00zuulzuul00000000000000 net.topology.impl org.apache.hadoop.net.NetworkTopologyWithNodeGroup The implementation of NetworkTopology which is classic three layer one by default. net.topology.nodegroup.aware true By default, network topology is not aware of nodegroup layer. dfs.block.replicator.classname org.apache.hadoop.hdfs.server.namenode.BlockPlacementPolicyWithNodeGroup The default implementation of ReplicationTargetChooser. fs.swift.service.sahara.location-aware true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/topology/resources/mapred-template.xml0000664000175000017500000000141100000000000024005 0ustar00zuulzuul00000000000000 mapred.jobtracker.nodegroup.aware true Identify if jobtracker is aware of nodegroup layer. mapred.task.cache.levels 3 This is the max level of the task cache. For example, if the level is 2, the tasks cached are at the host level and at the rack level. mapred.jobtracker.jobSchedulable org.apache.hadoop.mapred.JobSchedulableWithNodeGroup The class responsible for an entity in FairScheduler that can launch tasks. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/topology/topology_helper.py0000664000175000017500000001342600000000000021766 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import hashlib from oslo_config import cfg from oslo_log import log from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils.openstack import base as b from sahara.utils.openstack import nova from sahara.utils import xmlutils as x TOPOLOGY_CONFIG = { "topology.node.switch.mapping.impl": "org.apache.hadoop.net.ScriptBasedMapping", "topology.script.file.name": "/etc/hadoop/topology.sh" } LOG = log.getLogger(__name__) opts = [ cfg.BoolOpt('enable_data_locality', default=False, help="Enables data locality for hadoop cluster. " "Also enables data locality for Swift used by hadoop. " "If enabled, 'compute_topology' and 'swift_topology' " "configuration parameters should point to OpenStack and " "Swift topology correspondingly."), cfg.BoolOpt('enable_hypervisor_awareness', default=True, help="Enables four-level topology for data locality. " "Works only if corresponding plugin supports such mode."), cfg.StrOpt('compute_topology_file', default='etc/sahara/compute.topology', help="File with nova compute topology. " "It should contain mapping between nova computes and " "racks."), cfg.StrOpt('swift_topology_file', default='etc/sahara/swift.topology', help="File with Swift topology." "It should contain mapping between Swift nodes and " "racks.") ] CONF = cfg.CONF CONF.register_opts(opts) def _read_swift_topology(): LOG.debug("Reading Swift nodes topology from {config}".format( config=CONF.swift_topology_file)) topology = {} try: with open(CONF.swift_topology_file) as f: for line in f: line = line.strip() if not line: continue (host, path) = line.split() topology[host] = path except IOError: LOG.warning("Unable to read Swift nodes topology from {config}" .format(config=CONF.swift_topology_file)) return {} return topology def _read_compute_topology(): LOG.debug("Reading compute nodes topology from {config}".format( config=CONF.compute_topology_file)) ctx = context.ctx() tenant_id = str(ctx.tenant_id) topology = {} try: with open(CONF.compute_topology_file) as f: for line in f: line = line.strip() if not line: continue (host, path) = line.split() # Calculating host id based on tenant id and host # using the same algorithm as in nova # see nova/api/openstack/compute/views/servers.py # def _get_host_id(instance): sha_hash = hashlib.sha224(tenant_id + host) topology[sha_hash.hexdigest()] = path except IOError: raise ex.NotFoundException( CONF.compute_topology_file, _("Unable to find file %s with compute topology")) return topology def generate_topology_map(cluster, is_node_awareness): mapping = _read_compute_topology() nova_client = nova.client() topology_mapping = {} for ng in cluster.node_groups: for i in ng.instances: # TODO(alazarev) get all servers info with one request ni = b.execute_with_retries(nova_client.servers.get, i.instance_id) hostId = ni.hostId if hostId not in mapping: raise ex.NotFoundException( i.instance_id, _("Was not able to find compute node topology for VM %s")) rack = mapping[hostId] if is_node_awareness: rack += "/" + hostId topology_mapping[i.instance_name] = rack topology_mapping[i.management_ip] = rack topology_mapping[i.internal_ip] = rack topology_mapping.update(_read_swift_topology()) return topology_mapping def vm_awareness_core_config(): c = x.load_hadoop_xml_defaults('topology/resources/core-template.xml') result = [cfg for cfg in c if cfg['value']] if not CONF.enable_hypervisor_awareness: # not leveraging 4-layer approach so override template value param = next((prop for prop in result if prop['name'] == 'net.topology.impl'), None) if param: param['value'] = 'org.apache.hadoop.net.NetworkTopology' LOG.debug("Vm awareness will add following configs in core-site " "params: {result}".format(result=result)) return result def vm_awareness_mapred_config(): c = x.load_hadoop_xml_defaults('topology/resources/mapred-template.xml') result = [cfg for cfg in c if cfg['value']] LOG.debug("Vm awareness will add following configs in map-red " "params: {result}".format(result=result)) return result def vm_awareness_all_config(): return vm_awareness_core_config() + vm_awareness_mapred_config() def is_data_locality_enabled(): return CONF.enable_data_locality ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.785891 sahara-16.0.0/sahara/utils/0000775000175000017500000000000000000000000015457 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/__init__.py0000664000175000017500000000000000000000000017556 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/api.py0000664000175000017500000003444000000000000016607 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import traceback import flask import microversion_parse from oslo_log import log as logging from oslo_middleware import request_id as oslo_req_id import six from werkzeug import datastructures from sahara.api import microversion as mv from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils import types from sahara.utils import wsgi LOG = logging.getLogger(__name__) class Rest(flask.Blueprint): def get(self, rule, status_code=200): return self._mroute('GET', rule, status_code) def post(self, rule, status_code=202): return self._mroute('POST', rule, status_code) def post_file(self, rule, status_code=202): return self._mroute('POST', rule, status_code, file_upload=True) def put(self, rule, status_code=202): return self._mroute('PUT', rule, status_code) def put_file(self, rule, status_code=202): return self._mroute('PUT', rule, status_code, file_upload=True) def delete(self, rule, status_code=204): return self._mroute('DELETE', rule, status_code) def patch(self, rule, status_code=202): return self._mroute('PATCH', rule, status_code) def _mroute(self, methods, rule, status_code=None, **kw): if isinstance(methods, six.string_types): methods = [methods] return self.route(rule, methods=methods, status_code=status_code, **kw) def route(self, rule, **options): status = options.pop('status_code', None) file_upload = options.pop('file_upload', False) def decorator(func): endpoint = options.pop('endpoint', func.__name__) def handler(**kwargs): context.set_ctx(None) LOG.debug("Rest.route.decorator.handler, kwargs={kwargs}" .format(kwargs=kwargs)) _init_resp_type(file_upload) # update status code if status: flask.request.status_code = status kwargs.pop("tenant_id", None) req_id = flask.request.environ.get(oslo_req_id.ENV_REQUEST_ID) auth_plugin = flask.request.environ.get('keystone.token_auth') ctx = context.Context( flask.request.headers['X-User-Id'], flask.request.headers['X-Tenant-Id'], flask.request.headers['X-Auth-Token'], flask.request.headers['X-Service-Catalog'], flask.request.headers['X-User-Name'], flask.request.headers['X-Tenant-Name'], flask.request.headers['X-Roles'].split(','), auth_plugin=auth_plugin, request_id=req_id) context.set_ctx(ctx) try: if flask.request.method in ['POST', 'PUT', 'PATCH']: kwargs['data'] = request_data() return func(**kwargs) except ex.Forbidden as e: return access_denied(e) except ex.SaharaException as e: return bad_request(e) except Exception as e: return internal_error(500, 'Internal Server Error', e) f_rule = "/" + rule self.add_url_rule(rule, endpoint, handler, **options) self.add_url_rule(rule + '.json', endpoint, handler, **options) self.add_url_rule(f_rule, endpoint, handler, **options) self.add_url_rule(f_rule + '.json', endpoint, handler, **options) return func return decorator def check_microversion_header(): requested_version = get_requested_microversion() if not re.match(mv.VERSION_STRING_REGEX, requested_version): bad_request_microversion(requested_version) if requested_version not in mv.API_VERSIONS: not_acceptable_microversion(requested_version) def add_vary_header(response): response.headers[mv.VARY_HEADER] = mv.OPENSTACK_API_VERSION_HEADER response.headers[mv.OPENSTACK_API_VERSION_HEADER] = "{} {}".format( mv.SAHARA_SERVICE_TYPE, get_requested_microversion()) return response class RestV2(Rest): def __init__(self, *args, **kwargs): super(RestV2, self).__init__(*args, **kwargs) self.before_request(check_microversion_header) self.after_request(add_vary_header) def route(self, rule, **options): status = options.pop('status_code', None) file_upload = options.pop('file_upload', False) def decorator(func): endpoint = options.pop('endpoint', func.__name__) def handler(**kwargs): context.set_ctx(None) LOG.debug("Rest.route.decorator.handler, kwargs={kwargs}" .format(kwargs=kwargs)) _init_resp_type(file_upload) # update status code if status: flask.request.status_code = status kwargs.pop("tenant_id", None) req_id = flask.request.environ.get(oslo_req_id.ENV_REQUEST_ID) auth_plugin = flask.request.environ.get('keystone.token_auth') ctx = context.Context( flask.request.headers['X-User-Id'], flask.request.headers['X-Tenant-Id'], flask.request.headers['X-Auth-Token'], flask.request.headers['X-Service-Catalog'], flask.request.headers['X-User-Name'], flask.request.headers['X-Tenant-Name'], flask.request.headers['X-Roles'].split(','), auth_plugin=auth_plugin, request_id=req_id) context.set_ctx(ctx) try: if flask.request.method in ['POST', 'PUT', 'PATCH']: kwargs['data'] = request_data() return func(**kwargs) except ex.Forbidden as e: return access_denied(e) except ex.SaharaException as e: return bad_request(e) except Exception as e: return internal_error(500, 'Internal Server Error', e) f_rule = "/" + rule self.add_url_rule(rule, endpoint, handler, **options) self.add_url_rule(rule + '.json', endpoint, handler, **options) self.add_url_rule(f_rule, endpoint, handler, **options) self.add_url_rule(f_rule + '.json', endpoint, handler, **options) return func return decorator RT_JSON = datastructures.MIMEAccept([("application/json", 1)]) def _init_resp_type(file_upload): """Extracts response content type.""" # get content type from Accept header resp_type = flask.request.accept_mimetypes # url /foo.json if flask.request.path.endswith('.json'): resp_type = RT_JSON flask.request.resp_type = resp_type # set file upload flag flask.request.file_upload = file_upload def render(res=None, resp_type=None, status=None, name=None, **kwargs): if not res and type(res) is not types.Page: res = {} if type(res) is dict: res.update(kwargs) elif type(res) is types.Page: result = {name: [item.to_dict() for item in res]} result.update(kwargs) if res.prev or res.next or ('marker' in get_request_args()): result["markers"] = {"prev": res.prev, "next": res.next} res = result elif kwargs: # can't merge kwargs into the non-dict res abort_and_log(500, _("Non-dict and non-empty kwargs passed to render")) status_code = getattr(flask.request, 'status_code', None) if status: status_code = status if not status_code: status_code = 200 if not resp_type: resp_type = getattr(flask.request, 'resp_type', RT_JSON) if not resp_type: resp_type = RT_JSON serializer = None if "application/json" in resp_type: resp_type = RT_JSON serializer = wsgi.JSONDictSerializer() else: raise ex.InvalidDataException( _("Content type '%s' isn't supported") % resp_type) body = serializer.serialize(res) resp_type = str(resp_type) return flask.Response(response=body, status=status_code, mimetype=resp_type) def request_data(): if hasattr(flask.request, 'parsed_data'): return flask.request.parsed_data if (flask.request.content_length is None or not flask.request.content_length > 0): LOG.debug("Empty body provided in request") return dict() if flask.request.file_upload: return flask.request.data deserializer = None content_type = flask.request.mimetype if not content_type or content_type in RT_JSON: deserializer = wsgi.JSONDeserializer() else: raise ex.InvalidDataException( _("Content type '%s' isn't supported") % content_type) # parsed request data to avoid unwanted re-parsings parsed_data = deserializer.deserialize(flask.request.data)['body'] flask.request.parsed_data = parsed_data return flask.request.parsed_data def get_request_args(): return flask.request.args def get_requested_microversion(): requested_version = microversion_parse.get_version( flask.request.headers, mv.SAHARA_SERVICE_TYPE ) if requested_version is None: requested_version = mv.MIN_API_VERSION elif requested_version == mv.LATEST: requested_version = mv.MAX_API_VERSION return requested_version def abort_and_log(status_code, descr, exc=None): LOG.error("Request aborted with status code {code} and " "message '{message}'".format(code=status_code, message=descr)) if exc is not None: LOG.error(traceback.format_exc()) flask.abort(status_code, description=descr) def render_error_message(error_code, error_message, error_name, **msg_kwargs): message = { "error_code": error_code, "error_message": error_message, "error_name": error_name } message.update(**msg_kwargs) resp = render(message) resp.status_code = error_code return resp def not_acceptable_microversion(requested_version): message = ("Version {} is not supported by the API. " "Minimum is {} and maximum is {}.".format( requested_version, mv.MIN_API_VERSION, mv.MAX_API_VERSION )) resp = render_error_message( mv.NOT_ACCEPTABLE_STATUS_CODE, message, mv.NOT_ACCEPTABLE_STATUS_NAME, max_version=mv.MAX_API_VERSION, min_version=mv.MIN_API_VERSION ) flask.abort(resp) def bad_request_microversion(requested_version): message = ("API Version String {} is of invalid format. Must be of format" " MajorNum.MinorNum.").format(requested_version) resp = render_error_message( mv.BAD_REQUEST_STATUS_CODE, message, mv.BAD_REQUEST_STATUS_NAME, max_version=mv.MAX_API_VERSION, min_version=mv.MIN_API_VERSION ) flask.abort(resp) def invalid_param_error(status_code, descr, exc=None): LOG.error("Request aborted with status code {code} and " "message '{message}'".format(code=status_code, message=descr)) if exc is not None: LOG.error(traceback.format_exc()) error_code = "INVALID_PARAMS_ON_REQUEST" return render_error_message(status_code, descr, error_code) def internal_error(status_code, descr, exc=None): LOG.error("Request aborted with status code {code} and " "message '{message}'".format(code=status_code, message=descr)) if exc is not None: LOG.error(traceback.format_exc()) error_code = "INTERNAL_SERVER_ERROR" if status_code == 501: error_code = "NOT_IMPLEMENTED_ERROR" return render_error_message(status_code, descr, error_code) def bad_request(error): error_code = 400 LOG.error("Validation Error occurred: " "error_code={code}, error_message={message}, " "error_name={name}".format(code=error_code, message=error.message, name=error.code)) return render_error_message(error_code, error.message, error.code) def access_denied(error): error_code = 403 LOG.error("Access Denied: error_code={code}, error_message={message}, " "error_name={name}".format(code=error_code, message=error.message, name=error.code)) return render_error_message(error_code, error.message, error.code) def not_found(error): error_code = 404 LOG.error("Not Found exception occurred: " "error_code={code}, error_message={message}, " "error_name={name}".format(code=error_code, message=error.message, name=error.code)) return render_error_message(error_code, error.message, error.code) def to_wrapped_dict(func, id, *args, **kwargs): return render(to_wrapped_dict_no_render(func, id, *args, **kwargs)) def to_wrapped_dict_no_render(func, id, *args, **kwargs): obj = func(id, *args, **kwargs) if obj is None: e = ex.NotFoundException( {'id': id}, _('Object with %s not found')) return not_found(e) return obj.to_wrapped_dict() def _replace_hadoop_version_plugin_version(obj): dict.update(obj, {'plugin_version': obj['hadoop_version']}) dict.pop(obj, 'hadoop_version') def _replace_tenant_id_project_id(obj): dict.update(obj, {'project_id': obj['tenant_id']}) dict.pop(obj, 'tenant_id') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/api_validator.py0000664000175000017500000001316700000000000020657 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import jsonschema from oslo_utils import uuidutils import six from sahara.service.edp.job_binaries import manager as jb_manager @jsonschema.FormatChecker.cls_checks('valid_name_hostname') def validate_name_hostname_format(entry): if not isinstance(entry, six.string_types) or not entry: # should fail type or length validation return True res = re.match(r"^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]" r"*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z]" r"[A-Za-z0-9\-]*[A-Za-z0-9])$", entry) return res is not None @jsonschema.FormatChecker.cls_checks('valid_name') def validate_name_format(entry): if not isinstance(entry, six.string_types): # should fail type validation return True res = re.match(r"^[a-zA-Z0-9][a-zA-Z0-9\-_\.]*$", entry) return res is not None @jsonschema.FormatChecker.cls_checks('valid_keypair_name') def validate_keypair_name_format(entry): if not isinstance(entry, six.string_types): # should fail type validation return True # this follows the validation put forth by nova for keypair names res = re.match(r'^[a-zA-Z0-9\-_ ]+$', entry) return res is not None @jsonschema.FormatChecker.cls_checks('valid_job_location') def validate_job_location_format(entry): if not isinstance(entry, six.string_types): # should fail type validation return True return jb_manager.JOB_BINARIES \ .get_job_binary_by_url(entry) \ .validate_job_location_format(entry) @jsonschema.FormatChecker.cls_checks('valid_tag') def validate_valid_tag_format(entry): if not isinstance(entry, six.string_types): # should fail type validation return True res = re.match(r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-_]" r"*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9]" r"[A-Za-z0-9\-_]*[A-Za-z0-9])$", entry) return res is not None @jsonschema.FormatChecker.cls_checks('uuid') def validate_uuid_format(entry): if not isinstance(entry, six.string_types): # should fail type validation return True return uuidutils.is_uuid_like(entry) @jsonschema.FormatChecker.cls_checks('posix_path') def validate_posix_path(entry): if not isinstance(entry, six.string_types): # should fail type validation return True res = re.match("^(/([A-Z]|[a-z]|[0-9]|\-|_)+)+$", entry) return res is not None class ConfigTypeMeta(type): def __instancecheck__(cls, instance): # configs should be dict if not isinstance(instance, dict): return False # check dict content for applicable_target, configs in six.iteritems(instance): # upper-level dict keys (applicable targets) should be strings if not isinstance(applicable_target, six.string_types): return False # upper-level dict values should be dicts if not isinstance(configs, dict): return False # check internal dict content for config_name, config_value in six.iteritems(configs): # internal dict keys should be strings if not isinstance(config_name, six.string_types): return False # internal dict values should be strings or integers or bools if not isinstance(config_value, (six.string_types, six.integer_types)): return False return True class SimpleConfigTypeMeta(type): def __instancecheck__(cls, instance): # configs should be dict if not isinstance(instance, dict): return False # check dict content for conf_name, conf_value in six.iteritems(instance): # keys should be strings, values should be int, string or bool if not isinstance(conf_name, six.string_types): return False if not isinstance(conf_value, (six.string_types, six.integer_types)): return False return True @six.add_metaclass(ConfigTypeMeta) class ConfigsType(dict): pass @six.add_metaclass(SimpleConfigTypeMeta) class SimpleConfigsType(dict): pass class FlavorTypeMeta(type): def __instancecheck__(cls, instance): try: int(instance) except (ValueError, TypeError): return (isinstance(instance, six.string_types) and uuidutils.is_uuid_like(instance)) return (isinstance(instance, six.integer_types + six.string_types) and type(instance) != bool) @six.add_metaclass(FlavorTypeMeta) class FlavorType(object): pass class ApiValidator(jsonschema.Draft4Validator): def __init__(self, schema): format_checker = jsonschema.FormatChecker() super(ApiValidator, self).__init__( schema, format_checker=format_checker, types={ "configs": ConfigsType, "flavor": FlavorType, "simple_config": SimpleConfigsType, }) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/cluster.py0000664000175000017500000001460700000000000017522 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Intel Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import socket from keystoneauth1 import exceptions as keystone_ex from oslo_config import cfg from oslo_log import log as logging from six.moves.urllib import parse from sahara import conductor as c from sahara import context from sahara import exceptions as e from sahara.utils.notification import sender from sahara.utils.openstack import base as auth_base conductor = c.API LOG = logging.getLogger(__name__) CONF = cfg.CONF # cluster status CLUSTER_STATUS_VALIDATING = "Validating" CLUSTER_STATUS_INFRAUPDATING = "InfraUpdating" CLUSTER_STATUS_SPAWNING = "Spawning" CLUSTER_STATUS_WAITING = "Waiting" CLUSTER_STATUS_PREPARING = "Preparing" CLUSTER_STATUS_CONFIGURING = "Configuring" CLUSTER_STATUS_STARTING = "Starting" CLUSTER_STATUS_ACTIVE = "Active" CLUSTER_STATUS_DECOMMISSIONING = "Decommissioning" CLUSTER_STATUS_ERROR = "Error" CLUSTER_STATUS_DELETING = "Deleting" CLUSTER_STATUS_AWAITINGTERMINATION = "AwaitingTermination" # cluster status -- Instances CLUSTER_STATUS_DELETING_INSTANCES = "Deleting Instances" CLUSTER_STATUS_ADDING_INSTANCES = "Adding Instances" # Scaling status CLUSTER_STATUS_SCALING = "Scaling" CLUSTER_STATUS_SCALING_SPAWNING = (CLUSTER_STATUS_SCALING + ": " + CLUSTER_STATUS_SPAWNING) CLUSTER_STATUS_SCALING_WAITING = (CLUSTER_STATUS_SCALING + ": " + CLUSTER_STATUS_WAITING) CLUSTER_STATUS_SCALING_PREPARING = (CLUSTER_STATUS_SCALING + ": " + CLUSTER_STATUS_PREPARING) # Rollback status CLUSTER_STATUS_ROLLBACK = "Rollback" CLUSTER_STATUS_ROLLBACK_SPAWNING = (CLUSTER_STATUS_ROLLBACK + ": " + CLUSTER_STATUS_SPAWNING) CLUSTER_STATUS_ROLLBACK_WAITING = (CLUSTER_STATUS_ROLLBACK + ": " + CLUSTER_STATUS_WAITING) CLUSTER_STATUS_ROLLBACK__PREPARING = (CLUSTER_STATUS_ROLLBACK + ": " + CLUSTER_STATUS_PREPARING) def change_cluster_status_description(cluster, status_description): try: ctx = context.ctx() return conductor.cluster_update( ctx, cluster, {'status_description': status_description}) except e.NotFoundException: return None def change_cluster_status(cluster, status, status_description=None): ctx = context.ctx() # Update cluster status. Race conditions with deletion are still possible, # but this reduces probability at least. cluster = conductor.cluster_get(ctx, cluster) if cluster else None if status_description is not None: change_cluster_status_description(cluster, status_description) # 'Deleting' is final and can't be changed if cluster is None or cluster.status == CLUSTER_STATUS_DELETING: return cluster update_dict = {"status": status} cluster = conductor.cluster_update(ctx, cluster, update_dict) conductor.cluster_provision_progress_update(ctx, cluster.id) LOG.info("Cluster status has been changed. New status=" "{status}".format(status=cluster.status)) sender.status_notify(cluster.id, cluster.name, cluster.status, "update") return cluster def count_instances(cluster): return sum([node_group.count for node_group in cluster.node_groups]) def check_cluster_exists(cluster): ctx = context.ctx() # check if cluster still exists (it might have been removed) cluster = conductor.cluster_get(ctx, cluster) return cluster is not None def get_instances(cluster, instances_ids=None): inst_map = {} for node_group in cluster.node_groups: for instance in node_group.instances: inst_map[instance.id] = instance if instances_ids is not None: return [inst_map[id] for id in instances_ids] else: return [v for v in inst_map.values()] def clean_cluster_from_empty_ng(cluster): ctx = context.ctx() for ng in cluster.node_groups: if ng.count == 0: conductor.node_group_remove(ctx, ng) def etc_hosts_entry_for_service(service): result = "" try: hostname = parse.urlparse( auth_base.url_for(service_type=service, endpoint_type="publicURL")).hostname except keystone_ex.EndpointNotFound: LOG.debug("Endpoint not found for service: '{}'".format(service)) return result overridden_ip = ( getattr(CONF, "%s_ip_accessible" % service.replace('-', '_'), None) ) if overridden_ip is not None: return "%s %s\n" % (overridden_ip, hostname) try: result = "%s %s\n" % (socket.gethostbyname(hostname), hostname) except socket.gaierror: LOG.warning("Failed to resolve hostname of service: '{}'" .format(service)) result = "# Failed to resolve {} during deployment\n".format(hostname) return result def _etc_hosts_for_services(hosts): # add alias for keystone and swift for service in ["identity", "object-store"]: hosts += etc_hosts_entry_for_service(service) return hosts def _etc_hosts_for_instances(hosts, cluster): for node_group in cluster.node_groups: for instance in node_group.instances: hosts += "%s %s %s\n" % (instance.internal_ip, instance.fqdn(), instance.hostname()) return hosts def generate_etc_hosts(cluster): hosts = "127.0.0.1 localhost\n" if not cluster.use_designate_feature(): hosts = _etc_hosts_for_instances(hosts, cluster) hosts = _etc_hosts_for_services(hosts) return hosts def generate_resolv_conf_diff(curr_resolv_conf): # returns string that contains nameservers # which are lacked in the 'curr_resolve_conf' resolv_conf = "" for ns in CONF.nameservers: if ns not in curr_resolv_conf: resolv_conf += "nameserver {}\n".format(ns) return resolv_conf ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/cluster_progress_ops.py0000664000175000017500000001352300000000000022323 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_config import cfg from oslo_utils import excutils from oslo_utils import timeutils import six from sahara import conductor as c from sahara.conductor import resource from sahara import context from sahara.utils import cluster as cluster_utils conductor = c.API CONF = cfg.CONF event_log_opts = [ cfg.BoolOpt('disable_event_log', default=False, help="Disables event log feature.") ] CONF.register_opts(event_log_opts) def add_successful_event(instance): if CONF.disable_event_log: return cluster_id = instance.cluster_id step_id = get_current_provisioning_step(cluster_id) if step_id: conductor.cluster_event_add(context.ctx(), step_id, { 'successful': True, 'node_group_id': instance.node_group_id, 'instance_id': instance.instance_id, 'instance_name': instance.instance_name, 'event_info': None, }) def add_fail_event(instance, exception): if CONF.disable_event_log: return cluster_id = instance.cluster_id step_id = get_current_provisioning_step(cluster_id) event_info = six.text_type(exception) if step_id: conductor.cluster_event_add(context.ctx(), step_id, { 'successful': False, 'node_group_id': instance.node_group_id, 'instance_id': instance.instance_id, 'instance_name': instance.instance_name, 'event_info': event_info, }) def add_provisioning_step(cluster_id, step_name, total): if (CONF.disable_event_log or not cluster_utils.check_cluster_exists(cluster_id)): return prev_step = get_current_provisioning_step(cluster_id) if prev_step: conductor.cluster_provision_step_update(context.ctx(), prev_step) step_type = context.ctx().current_instance_info.step_type new_step = conductor.cluster_provision_step_add( context.ctx(), cluster_id, { 'step_name': step_name, 'step_type': step_type, 'total': total, 'started_at': timeutils.utcnow(), }) context.current().current_instance_info.step_id = new_step return new_step def get_current_provisioning_step(cluster_id): if (CONF.disable_event_log or not cluster_utils.check_cluster_exists(cluster_id)): return None current_instance_info = context.ctx().current_instance_info return current_instance_info.step_id def event_wrapper(mark_successful_on_exit, **spec): """"General event-log wrapper :param mark_successful_on_exit: should we send success event after execution of function :param spec: extra specification :parameter step: provisioning step name (only for provisioning steps with only one event) :parameter param: tuple (name, pos) with parameter specification, where 'name' is the name of the parameter of function, 'pos' is the position of the parameter of function. This parameter is used to extract info about Instance or Cluster. """ def decorator(func): @functools.wraps(func) def handler(*args, **kwargs): if CONF.disable_event_log: return func(*args, **kwargs) step_name = spec.get('step', None) instance = _find_in_args(spec, *args, **kwargs) cluster_id = instance.cluster_id if not cluster_utils.check_cluster_exists(cluster_id): return func(*args, **kwargs) if step_name: # It's single process, let's add provisioning step here add_provisioning_step(cluster_id, step_name, 1) try: value = func(*args, **kwargs) except Exception as e: with excutils.save_and_reraise_exception(): add_fail_event(instance, e) if mark_successful_on_exit: add_successful_event(instance) return value return handler return decorator def _get_info_from_instance(arg): if isinstance(arg, resource.InstanceResource): return arg return None def _get_info_from_cluster(arg): if isinstance(arg, resource.ClusterResource): return context.InstanceInfo(arg.id) return None def _get_event_info(arg): try: return arg.get_event_info() except AttributeError: return None def _get_info_from_obj(arg): functions = [_get_info_from_instance, _get_info_from_cluster, _get_event_info] for func in functions: value = func(arg) if value: return value return None def _find_in_args(spec, *args, **kwargs): param_values = spec.get('param', None) if param_values: p_name, p_pos = param_values obj = kwargs.get(p_name, None) if obj: return _get_info_from_obj(obj) return _get_info_from_obj(args[p_pos]) # If param is not specified, let's search instance in args for arg in args: val = _get_info_from_instance(arg) if val: return val for arg in kwargs.values(): val = _get_info_from_instance(arg) if val: return val # If instance not found in args, let's get instance info from context return context.ctx().current_instance_info ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/configs.py0000664000175000017500000000204400000000000017461 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. def merge_configs(*configs): """Merge configs in special format. It supports merging of configs in the following format: applicable_target -> config_name -> config_value """ result = {} for config in configs: if config: for a_target in config: if a_target not in result or not result[a_target]: result[a_target] = {} result[a_target].update(config[a_target]) return result ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/crypto.py0000664000175000017500000000444500000000000017360 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from oslo_concurrency import processutils import paramiko import six from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils import tempfiles def to_paramiko_private_key(pkey): """Convert private key (str) to paramiko-specific RSAKey object.""" return paramiko.RSAKey(file_obj=six.StringIO(pkey)) def generate_key_pair(key_length=2048): """Create RSA key pair with specified number of bits in key. Returns tuple of private and public keys. """ with tempfiles.tempdir() as tmpdir: keyfile = os.path.join(tmpdir, 'tempkey') # The key is generated in the old PEM format, instead of the native # format of OpenSSH >=6.5, because paramiko does not support it: # https://github.com/paramiko/paramiko/issues/602 args = [ 'ssh-keygen', '-q', # quiet '-N', '', # w/o passphrase '-m', 'PEM', # old PEM format '-t', 'rsa', # create key of rsa type '-f', keyfile, # filename of the key file '-C', 'Generated-by-Sahara' # key comment ] if key_length is not None: args.extend(['-b', key_length]) processutils.execute(*args) if not os.path.exists(keyfile): raise ex.SystemError(_("Private key file hasn't been created")) with open(keyfile) as keyfile_fd: private_key = keyfile_fd.read() public_key_path = keyfile + '.pub' if not os.path.exists(public_key_path): raise ex.SystemError(_("Public key file hasn't been created")) with open(public_key_path) as public_key_path_fd: public_key = public_key_path_fd.read() return private_key, public_key ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/edp.py0000664000175000017500000001110300000000000016575 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils from sahara.utils import files # job execution status JOB_STATUS_DONEWITHERROR = 'DONEWITHERROR' JOB_STATUS_FAILED = 'FAILED' JOB_STATUS_KILLED = 'KILLED' JOB_STATUS_PENDING = 'PENDING' JOB_STATUS_READYTORUN = 'READYTORUN' JOB_STATUS_RUNNING = 'RUNNING' JOB_STATUS_SUCCEEDED = 'SUCCEEDED' JOB_STATUS_TOBEKILLED = 'TOBEKILLED' JOB_STATUS_TOBESUSPENDED = 'TOBESUSPENDED' JOB_STATUS_PREP = 'PREP' JOB_STATUS_PREPSUSPENDED = 'PREPSUSPENDED' JOB_STATUS_SUSPENDED = 'SUSPENDED' JOB_STATUS_SUSPEND_FAILED = 'SUSPENDFAILED' # statuses for suspended jobs JOB_STATUSES_SUSPENDIBLE = [ JOB_STATUS_PREP, JOB_STATUS_RUNNING ] # statuses for terminated jobs JOB_STATUSES_TERMINATED = [ JOB_STATUS_DONEWITHERROR, JOB_STATUS_FAILED, JOB_STATUS_KILLED, JOB_STATUS_SUCCEEDED, JOB_STATUS_SUSPEND_FAILED ] # job type separator character JOB_TYPE_SEP = '.' # job sub types available JOB_SUBTYPE_STREAMING = 'Streaming' JOB_SUBTYPE_NONE = '' # job types available JOB_TYPE_HIVE = 'Hive' JOB_TYPE_JAVA = 'Java' JOB_TYPE_MAPREDUCE = 'MapReduce' JOB_TYPE_SPARK = 'Spark' JOB_TYPE_STORM = 'Storm' JOB_TYPE_PYLEUS = 'Storm.Pyleus' JOB_TYPE_MAPREDUCE_STREAMING = (JOB_TYPE_MAPREDUCE + JOB_TYPE_SEP + JOB_SUBTYPE_STREAMING) JOB_TYPE_PIG = 'Pig' JOB_TYPE_SHELL = 'Shell' # job type groupings available JOB_TYPES_ALL = [ JOB_TYPE_HIVE, JOB_TYPE_JAVA, JOB_TYPE_MAPREDUCE, JOB_TYPE_MAPREDUCE_STREAMING, JOB_TYPE_PIG, JOB_TYPE_SHELL, JOB_TYPE_SPARK, JOB_TYPE_STORM, JOB_TYPE_PYLEUS ] JOB_TYPES_ACCEPTABLE_CONFIGS = { JOB_TYPE_HIVE: {"configs", "params"}, JOB_TYPE_PIG: {"configs", "params", "args"}, JOB_TYPE_MAPREDUCE: {"configs"}, JOB_TYPE_MAPREDUCE_STREAMING: {"configs"}, JOB_TYPE_JAVA: {"configs", "args"}, JOB_TYPE_SHELL: {"configs", "params", "args"}, JOB_TYPE_SPARK: {"configs", "args"}, JOB_TYPE_STORM: {"args"}, JOB_TYPE_PYLEUS: {} } # job actions JOB_ACTION_SUSPEND = 'suspend' JOB_ACTION_CANCEL = 'cancel' JOB_ACTION_TYPES_ACCEPTABLE = [ JOB_ACTION_SUSPEND, JOB_ACTION_CANCEL ] ADAPT_FOR_OOZIE = 'edp.java.adapt_for_oozie' SPARK_DRIVER_CLASSPATH = 'edp.spark.driver.classpath' ADAPT_SPARK_FOR_SWIFT = 'edp.spark.adapt_for_swift' def split_job_type(job_type): '''Split a job type string into a type and subtype The split is done on the first '.'. A subtype will always be returned, even if it is empty. ''' type_info = job_type.split(JOB_TYPE_SEP, 1) if len(type_info) == 1: type_info.append('') return type_info def compare_job_type(job_type, *args, **kwargs): '''Compare a job type against a list of job types :param job_type: The job type being compared :param *args: A list of types to compare against :param strict: Passed as a keyword arg. Default is False. If strict is False, job_type will be compared with and without its subtype indicator. :returns: True if job_type is present in the list, False otherwise ''' strict = kwargs.get('strict', False) res = job_type in args if res or strict or JOB_TYPE_SEP not in job_type: return res jtype, jsubtype = split_job_type(job_type) return jtype in args def get_hive_shared_conf_path(hdfs_user): return "/user/%s/conf/hive-site.xml" % hdfs_user def is_adapt_for_oozie_enabled(configs): return configs.get(ADAPT_FOR_OOZIE, False) def is_adapt_spark_for_swift_enabled(configs): return configs.get(ADAPT_SPARK_FOR_SWIFT, False) def spark_driver_classpath(configs): # Return None in case when you need to use default value return configs.get(SPARK_DRIVER_CLASSPATH) def get_builtin_binaries(job, configs): if job.type == JOB_TYPE_JAVA: if is_adapt_for_oozie_enabled(configs): path = 'service/edp/resources/edp-main-wrapper.jar' name = 'builtin-%s.jar' % uuidutils.generate_uuid() return [{'raw': files.get_file_binary(path), 'name': name}] return [] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/files.py0000664000175000017500000000224600000000000017137 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from os import path import pkg_resources as pkg from sahara import version def get_file_text(file_name, package='sahara'): full_name = pkg.resource_filename( package, file_name) return open(full_name).read() def get_file_binary(file_name): full_name = pkg.resource_filename( version.version_info.package, file_name) return open(full_name, "rb").read() def try_get_file_text(file_name, package='sahara'): full_name = pkg.resource_filename( package, file_name) return ( open(full_name, "rb").read() if path.isfile(full_name) else False) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/general.py0000664000175000017500000000404300000000000017447 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import six NATURAL_SORT_RE = re.compile('([0-9]+)') def find_dict(iterable, **rules): """Search for dict in iterable of dicts using specified key-value rules.""" for item in iterable: # assert all key-value pairs from rules dict ok = True for k, v in six.iteritems(rules): ok = ok and k in item and item[k] == v if ok: return item return None def find(lst, **kwargs): for obj in lst: match = True for attr, value in kwargs.items(): if getattr(obj, attr) != value: match = False if match: return obj return None def get_by_id(lst, id): for obj in lst: if obj.id == id: return obj return None # Taken from http://stackoverflow.com/questions/4836710/does- # python-have-a-built-in-function-for-string-natural-sort def natural_sort_key(s): return [int(text) if text.isdigit() else text.lower() for text in re.split(NATURAL_SORT_RE, s)] def generate_instance_name(cluster_name, node_group_name, index): return ("%s-%s-%03d" % (cluster_name, node_group_name, index)).lower() def generate_auto_security_group_name(node_group): return ("%s-%s-%s" % (node_group.cluster.name, node_group.name, node_group.id[:8])).lower() def generate_aa_group_name(cluster_name, server_group_index): return ("%s-aa-group-%d" % (cluster_name, server_group_index)).lower() ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.785891 sahara-16.0.0/sahara/utils/hacking/0000775000175000017500000000000000000000000017063 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/hacking/__init__.py0000664000175000017500000000000000000000000021162 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/hacking/checks.py0000664000175000017500000000750600000000000020705 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pycodestyle import re import tokenize from hacking import core RE_OSLO_IMPORTS = (re.compile(r"(((from)|(import))\s+oslo\.)"), re.compile(r"(from\s+oslo\s+import)")) RE_DICT_CONSTRUCTOR_WITH_LIST_COPY = re.compile(r".*\bdict\((\[)?(\(|\[)") RE_USE_JSONUTILS_INVALID_LINE = re.compile(r"(import\s+json)") RE_USE_JSONUTILS_VALID_LINE = re.compile(r"(import\s+jsonschema)") RE_MUTABLE_DEFAULT_ARGS = re.compile(r"^\s*def .+\((.+=\{\}|.+=\[\])") def _starts_with_any(line, *prefixes): for prefix in prefixes: if line.startswith(prefix): return True return False def _any_in(line, *sublines): for subline in sublines: if subline in line: return True return False @core.flake8ext def import_db_only_in_conductor(logical_line, filename): """Check that db calls are only in conductor, plugins module and in tests. S361 """ if _any_in(filename, "sahara/conductor", "sahara/plugins", "sahara/tests", "sahara/db"): return if _starts_with_any(logical_line, "from sahara import db", "from sahara.db", "import sahara.db"): yield (0, "S361: sahara.db import only allowed in " "sahara/conductor/*") @core.flake8ext def hacking_no_author_attr(logical_line, tokens): """__author__ should not be used. S362: __author__ = slukjanov """ for token_type, text, start_index, _, _ in tokens: if token_type == tokenize.NAME and text == "__author__": yield (start_index[1], "S362: __author__ should not be used") @core.flake8ext def check_oslo_namespace_imports(logical_line): """Check to prevent old oslo namespace usage. S363 """ if re.match(RE_OSLO_IMPORTS[0], logical_line): yield(0, "S363: '%s' must be used instead of '%s'." % ( logical_line.replace('oslo.', 'oslo_'), logical_line)) if re.match(RE_OSLO_IMPORTS[1], logical_line): yield(0, "S363: '%s' must be used instead of '%s'" % ( 'import oslo_%s' % logical_line.split()[-1], logical_line)) @core.flake8ext def dict_constructor_with_list_copy(logical_line): """Check to prevent dict constructor with a sequence of key-value pairs. S368 """ if RE_DICT_CONSTRUCTOR_WITH_LIST_COPY.match(logical_line): yield (0, 'S368: Must use a dict comprehension instead of a dict ' 'constructor with a sequence of key-value pairs.') @core.flake8ext def use_jsonutils(logical_line, filename): """Check to prevent importing json in sahara code. S375 """ if pycodestyle.noqa(logical_line): return if (RE_USE_JSONUTILS_INVALID_LINE.match(logical_line) and not RE_USE_JSONUTILS_VALID_LINE.match(logical_line)): yield(0, "S375: Use jsonutils from oslo_serialization instead" " of json") @core.flake8ext def no_mutable_default_args(logical_line): """Check to prevent mutable default argument in sahara code. S360 """ msg = "S360: Method's default argument shouldn't be mutable!" if RE_MUTABLE_DEFAULT_ARGS.match(logical_line): yield (0, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/hacking/commit_message.py0000664000175000017500000000615200000000000022435 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re import subprocess # nosec from hacking import core class GitCheck(core.GlobalCheck): """Base-class for Git related checks.""" def _get_commit_title(self): # Check if we're inside a git checkout try: subp = subprocess.Popen( # nosec ['git', 'rev-parse', '--show-toplevel'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) gitdir = subp.communicate()[0].rstrip() except OSError: # "git" was not found return None if not os.path.exists(gitdir): return None # Get title of most recent commit subp = subprocess.Popen( # nosec ['git', 'log', '--no-merges', '--pretty=%s', '-1'], stdout=subprocess.PIPE) title = subp.communicate()[0] if subp.returncode: raise Exception("git log failed with code %s" % subp.returncode) return title.decode('utf-8') class OnceGitCheckCommitTitleBug(GitCheck): """Check git commit messages for bugs. OpenStack HACKING recommends not referencing a bug or blueprint in first line. It should provide an accurate description of the change S364 """ name = "GitCheckCommitTitleBug" # From https://github.com/openstack/openstack-ci-puppet # /blob/master/modules/gerrit/manifests/init.pp#L74 # Changeid|bug|blueprint GIT_REGEX = re.compile( r'(I[0-9a-f]{8,40})|' '([Bb]ug|[Ll][Pp])[\s\#:]*(\d+)|' '([Bb]lue[Pp]rint|[Bb][Pp])[\s\#:]*([A-Za-z0-9\\-]+)') def run_once(self): title = self._get_commit_title() # NOTE(jogo) if match regex but over 3 words, acceptable title if (title and self.GIT_REGEX.search(title) is not None and len(title.split()) <= 3): return (1, 0, "S364: git commit title ('%s') should provide an accurate " "description of the change, not just a reference to a bug " "or blueprint" % title.strip(), self.name) class OnceGitCheckCommitTitleLength(GitCheck): """Check git commit message length. HACKING recommends commit titles 50 chars or less, but enforces a 72 character limit S365 Title limited to 72 chars """ name = "GitCheckCommitTitleLength" def run_once(self): title = self._get_commit_title() if title and len(title) > 72: return ( 1, 0, "S365: git commit title ('%s') should be under 50 chars" % title.strip(), self.name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/hacking/logging_checks.py0000664000175000017500000000427700000000000022415 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re from hacking import core ALL_LOG_LEVELS = "info|exception|warning|critical|error|debug" RE_ACCEPTED_LOG_LEVELS = re.compile( r"(.)*LOG\.(%(levels)s)\(" % {'levels': ALL_LOG_LEVELS}) # Since _Lx() have been removed, we just need to check _() RE_TRANSLATED_LOG = re.compile( r"(.)*LOG\.(%(levels)s)\(\s*_\(" % {'levels': ALL_LOG_LEVELS}) @core.flake8ext def no_translate_logs(logical_line, filename): """Check for 'LOG.*(_(' Translators don't provide translations for log messages, and operators asked not to translate them. * This check assumes that 'LOG' is a logger. * Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. S373 """ msg = "S373 Don't translate logs" if RE_TRANSLATED_LOG.match(logical_line): yield (0, msg) @core.flake8ext def accepted_log_levels(logical_line, filename): """In Sahara we use only 5 log levels. This check is needed because we don't want new contributors to use deprecated log levels. S374 """ # NOTE(Kezar): sahara/tests included because we don't require translations # in tests. sahara/db/templates provide separate cli interface so we don't # want to translate it. ignore_dirs = ["sahara/db/templates", "sahara/tests"] for directory in ignore_dirs: if directory in filename: return msg = ("S374 You used deprecated log level. Accepted log levels are " "%(levels)s" % {'levels': ALL_LOG_LEVELS}) if logical_line.startswith("LOG."): if not RE_ACCEPTED_LOG_LEVELS.search(logical_line): yield(0, msg) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/network.py0000664000175000017500000000310100000000000017515 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Red Hat Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg CONF = cfg.CONF def has_floating_ip(instance): # Alternatively in each of these cases # we could use the nova client to look up the # ips for the instance and check the attributes # to ensure that the management_ip is a floating # ip, but a simple comparison with the internal_ip # corresponds with the logic in # sahara.service.networks.init_instances_ips if not instance.node_group.floating_ip_pool: return False # in the neutron case comparing ips is an extra simple check ... # maybe allocation of a floating ip failed for some reason # (Alternatively in each of these cases # we could use the nova client to look up the # ips for the instance and check the attributes # to ensure that the management_ip is a floating # ip, but a simple comparison with the internal_ip # corresponds with the logic in # sahara.service.networks.init_instances_ips) return instance.management_ip != instance.internal_ip ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.785891 sahara-16.0.0/sahara/utils/notification/0000775000175000017500000000000000000000000020145 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/notification/__init__.py0000664000175000017500000000000000000000000022244 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/notification/sender.py0000664000175000017500000000574700000000000022014 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from sahara import context from sahara.utils import rpc as messaging LOG = logging.getLogger(__name__) SERVICE = 'sahara' CLUSTER_EVENT_TEMPLATE = "sahara.cluster.%s" HEALTH_EVENT_TYPE = CLUSTER_EVENT_TEMPLATE % "health" notifier_opts = [ cfg.StrOpt('level', default='INFO', deprecated_name='notification_level', deprecated_group='DEFAULT', help='Notification level for outgoing notifications'), cfg.StrOpt('publisher_id', deprecated_name='notification_publisher_id', deprecated_group='DEFAULT', help='Identifier of the publisher') ] notifier_opts_group = 'oslo_messaging_notifications' CONF = cfg.CONF CONF.register_opts(notifier_opts, group=notifier_opts_group) def _get_publisher(): publisher_id = CONF.oslo_messaging_notifications.publisher_id if publisher_id is None: publisher_id = SERVICE return publisher_id def _notify(event_type, body): LOG.debug("Notification about cluster is going to be sent. Notification " "type={type}".format(type=event_type)) ctx = context.ctx() level = CONF.oslo_messaging_notifications.level body.update({'project_id': ctx.tenant_id, 'user_id': ctx.user_id}) client = messaging.get_notifier(_get_publisher()) method = getattr(client, level.lower()) method(ctx, event_type, body) def _health_notification_body(cluster, health_check): verification = cluster.verification return { 'cluster_id': cluster.id, 'cluster_name': cluster.name, 'verification_id': verification['id'], 'health_check_status': health_check['status'], 'health_check_name': health_check['name'], 'health_check_description': health_check['description'], 'created_at': health_check['created_at'], 'updated_at': health_check['updated_at'] } def status_notify(cluster_id, cluster_name, cluster_status, ev_type): """Sends notification about creating/updating/deleting cluster.""" _notify(CLUSTER_EVENT_TEMPLATE % ev_type, { 'cluster_id': cluster_id, 'cluster_name': cluster_name, 'cluster_status': cluster_status}) def health_notify(cluster, health_check): """Sends notification about current cluster health.""" _notify(HEALTH_EVENT_TYPE, _health_notification_body(cluster, health_check)) ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.789891 sahara-16.0.0/sahara/utils/openstack/0000775000175000017500000000000000000000000017446 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/__init__.py0000664000175000017500000000000000000000000021545 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/base.py0000664000175000017500000001025600000000000020736 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from keystoneauth1.access import service_catalog as keystone_service_catalog from keystoneauth1 import exceptions as keystone_ex from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urlparse from sahara import context from sahara import exceptions as ex LOG = logging.getLogger(__name__) # List of the errors, that can be retried ERRORS_TO_RETRY = [408, 413, 429, 500, 502, 503, 504] opts = [ cfg.IntOpt('retries_number', default=5, help='Number of times to retry the request to client before ' 'failing'), cfg.IntOpt('retry_after', default=10, help='Time between the retries to client (in seconds).') ] retries = cfg.OptGroup(name='retries', title='OpenStack clients calls retries') CONF = cfg.CONF CONF.register_group(retries) CONF.register_opts(opts, group=retries) def url_for(service_catalog=None, service_type='identity', endpoint_type="internalURL"): if not service_catalog: service_catalog = context.current().service_catalog try: return keystone_service_catalog.ServiceCatalogV2( json.loads(service_catalog)).url_for( service_type=service_type, interface=endpoint_type, region_name=CONF.os_region_name) except keystone_ex.EndpointNotFound: return keystone_service_catalog.ServiceCatalogV3( json.loads(service_catalog)).url_for( service_type=service_type, interface=endpoint_type, region_name=CONF.os_region_name) def prepare_auth_url(auth_url, version): info = urlparse.urlparse(auth_url) url_path = info.path.rstrip("/") # replacing current api version to empty string url_path = re.sub('/(v3/auth|v3|v2\.0)', '', url_path) url_path = (url_path + "/" + version).lstrip("/") return "%s://%s/%s" % (info[:2] + (url_path,)) def retrieve_auth_url(endpoint_type="internalURL", version=None): if not version: version = 'v3' if CONF.use_identity_api_v3 else 'v2.0' ctx = context.current() if ctx.service_catalog: auth_url = url_for(ctx.service_catalog, 'identity', endpoint_type) else: auth_url = CONF.trustee.auth_url return prepare_auth_url(auth_url, version) def execute_with_retries(method, *args, **kwargs): attempts = CONF.retries.retries_number + 1 while attempts > 0: try: return method(*args, **kwargs) except Exception as e: error_code = getattr(e, 'http_status', None) or getattr( e, 'status_code', None) or getattr(e, 'code', None) if error_code in ERRORS_TO_RETRY: LOG.warning('Occasional error occurred during "{method}" ' 'execution: {error_msg} ({error_code}). ' 'Operation will be retried.'.format( method=method.__name__, error_msg=e, error_code=error_code)) attempts -= 1 retry_after = getattr(e, 'retry_after', 0) context.sleep(max(retry_after, CONF.retries.retry_after)) else: LOG.debug('Permanent error occurred during "{method}" ' 'execution: {error_msg}.'.format( method=method.__name__, error_msg=e)) raise e else: attempts = CONF.retries.retries_number raise ex.MaxRetriesExceeded(attempts, method.__name__) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/cinder.py0000664000175000017500000000570300000000000021271 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2014 Adrien Vergé # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from cinderclient.v3 import client as cinder_client_v3 from keystoneauth1 import exceptions as keystone_exceptions from oslo_config import cfg from oslo_log import log as logging from sahara import context from sahara.service import sessions from sahara.utils.openstack import base from sahara.utils.openstack import keystone LOG = logging.getLogger(__name__) opts = [ cfg.IntOpt('api_version', default=3, help='Version of the Cinder API to use.', deprecated_name='cinder_api_version'), cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to cinder.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for cinder ' 'client requests.'), cfg.StrOpt("endpoint_type", default="internalURL", help="Endpoint type for cinder client requests") ] cinder_group = cfg.OptGroup(name='cinder', title='Cinder client options') CONF = cfg.CONF CONF.register_group(cinder_group) CONF.register_opts(opts, group=cinder_group) def validate_config(): if CONF.cinder.api_version != 3: LOG.warning('Unsupported Cinder API version: {bad}. Please set a ' 'correct value for cinder.api_version in your ' 'sahara.conf file (currently supported versions are: ' '{supported}). Falling back to Cinder API version 3.' .format(bad=CONF.cinder.api_version, supported=[3])) CONF.set_override('api_version', 3, group='cinder') def client(): session = sessions.cache().get_session(sessions.SESSION_TYPE_CINDER) auth = keystone.auth() cinder = cinder_client_v3.Client( session=session, auth=auth, endpoint_type=CONF.cinder.endpoint_type, region_name=CONF.os_region_name) return cinder def check_cinder_exists(): service_type = 'volumev3' try: base.url_for(context.current().service_catalog, service_type, endpoint_type=CONF.cinder.endpoint_type) return True except keystone_exceptions.EndpointNotFound: return False def get_volume(volume_id): return base.execute_with_retries(client().volumes.get, volume_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/glance.py0000664000175000017500000000313000000000000021246 0ustar00zuulzuul00000000000000# Copyright (c) 2016 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from glanceclient import client as glance_client from oslo_config import cfg from sahara.service import sessions from sahara.utils.openstack import keystone opts = [ cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to glance.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for glance ' 'client requests.'), cfg.StrOpt("endpoint_type", default="internalURL", help="Endpoint type for glance client requests"), ] glance_group = cfg.OptGroup(name='glance', title='Glance client options') CONF = cfg.CONF CONF.register_group(glance_group) CONF.register_opts(opts, group=glance_group) def client(): session = sessions.cache().get_session(sessions.SESSION_TYPE_GLANCE) glance = glance_client.Client('2', session=session, auth=keystone.auth(), interface=CONF.glance.endpoint_type) return glance ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/heat.py0000664000175000017500000000775300000000000020755 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from heatclient import client as heat_client from oslo_config import cfg from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.service import sessions from sahara.utils.openstack import base from sahara.utils.openstack import keystone opts = [ cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to heat.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for heat ' 'client requests.'), cfg.StrOpt("endpoint_type", default="internalURL", help="Endpoint type for heat client requests") ] heat_group = cfg.OptGroup(name='heat', title='Heat client options') CONF = cfg.CONF CONF.register_group(heat_group) CONF.register_opts(opts, group=heat_group) def client(): ctx = context.ctx() session = sessions.cache().get_heat_session() heat_url = base.url_for(ctx.service_catalog, 'orchestration', endpoint_type=CONF.heat.endpoint_type) return heat_client.Client( '1', endpoint=heat_url, session=session, auth=keystone.auth(), region_name=CONF.os_region_name) def get_stack(stack_name, raise_on_missing=True): for stack in base.execute_with_retries( client().stacks.list, show_hidden=True, filters={'name': stack_name}): return stack if not raise_on_missing: return None raise ex.NotFoundException({'stack': stack_name}, _('Failed to find stack %(stack)s')) def delete_stack(cluster): stack_name = cluster.stack_name base.execute_with_retries(client().stacks.delete, stack_name) stack = get_stack(stack_name, raise_on_missing=False) while stack is not None: # Valid states: IN_PROGRESS, empty and COMPLETE if stack.status in ['IN_PROGRESS', '', 'COMPLETE']: context.sleep(5) else: raise ex.HeatStackException( message=_( "Cannot delete heat stack {name}, reason: " "stack status: {status}, status reason: {reason}").format( name=stack_name, status=stack.status, reason=stack.stack_status_reason)) stack = get_stack(stack_name, raise_on_missing=False) def lazy_delete_stack(cluster): '''Attempt to delete stack once, but do not await successful deletion''' stack_name = cluster.stack_name base.execute_with_retries(client().stacks.delete, stack_name) def get_stack_outputs(cluster): stack = get_stack(cluster.stack_name) stack.get() return stack.outputs def _verify_completion(stack, is_update=False, last_update_time=None): # NOTE: expected empty status because status of stack # maybe is not set in heat database if stack.status in ['IN_PROGRESS', '']: return False if is_update and stack.status == 'COMPLETE': if stack.updated_time == last_update_time: return False return True def wait_stack_completion(cluster, is_update=False, last_updated_time=None): stack_name = cluster.stack_name stack = get_stack(stack_name) while not _verify_completion(stack, is_update, last_updated_time): context.sleep(1) stack = get_stack(stack_name) if stack.status != 'COMPLETE': raise ex.HeatStackException(stack.stack_status_reason) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/images.py0000664000175000017500000001363300000000000021273 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import six from sahara.conductor import resource from sahara import exceptions as exc from sahara.utils.openstack import glance PROP_DESCR = '_sahara_description' PROP_USERNAME = '_sahara_username' PROP_TAG = '_sahara_tag_' PROP_ALL_TAGS = '_all_tags' def image_manager(): return SaharaImageManager() def wrap_entity(func): @functools.wraps(func) def handle(*args, **kwargs): res = func(*args, **kwargs) if isinstance(res, list): images = [] for image in res: image = _transform_image_props(image) images.append(resource.ImageResource(image)) return images else: res = _transform_image_props(res) return resource.ImageResource(res) return handle def _get_all_tags(image_props): tags = [] for key, value in image_props.iteritems(): if key.startswith(PROP_TAG) and value: tags.append(key) return tags def _get_meta_prop(image_props, prop, default=None): if PROP_ALL_TAGS == prop: return _get_all_tags(image_props) return image_props.get(prop, default) def _parse_tags(image_props): tags = _get_meta_prop(image_props, PROP_ALL_TAGS) return [t.replace(PROP_TAG, "") for t in tags] def _serialize_metadata(image): data = {} for key, value in image.iteritems(): if key.startswith('_sahara') and value: data[key] = value return data def _get_compat_values(image): data = {} # TODO(vgridnev): Drop these values from APIv2 data["OS-EXT-IMG-SIZE:size"] = image.size data['metadata'] = _serialize_metadata(image) data["minDisk"] = getattr(image, 'min_disk', 0) data["minRam"] = getattr(image, 'min_ram', 0) data["progress"] = getattr(image, 'progress', 100) data["status"] = image.status.upper() data['created'] = image.created_at data['updated'] = image.updated_at return data def _transform_image_props(image): data = _get_compat_values(image) data['username'] = _get_meta_prop(image, PROP_USERNAME, "") data['description'] = _get_meta_prop(image, PROP_DESCR, "") data['tags'] = _parse_tags(image) data['id'] = image.id data["name"] = image.name return data def _ensure_tags(tags): if not tags: return [] return [tags] if isinstance(tags, six.string_types) else tags class SaharaImageManager(object): """SaharaImageManager This class is intermediate layer between sahara and glanceclient.v2.images. It provides additional sahara properties for image such as description, image tags and image username. """ def __init__(self): self.client = glance.client().images @wrap_entity def get(self, image_id): image = self.client.get(image_id) return image @wrap_entity def find(self, **kwargs): images = self.client.list(**kwargs) num_matches = len(images) if num_matches == 0: raise exc.NotFoundException(kwargs, "No images matching %s.") elif num_matches > 1: raise exc.NoUniqueMatchException(response=images, query=kwargs) else: return images[0] @wrap_entity def list(self): return list(self.client.list()) def set_meta(self, image_id, meta): self.client.update(image_id, remove_props=None, **meta) def delete_meta(self, image_id, meta_list): self.client.update(image_id, remove_props=meta_list) def set_image_info(self, image_id, username, description=None): """Sets human-readable information for image. For example: Ubuntu 15 x64 with Java 1.7 and Apache Hadoop 2.1, ubuntu """ meta = {PROP_USERNAME: username} if description: meta[PROP_DESCR] = description self.set_meta(image_id, meta) def unset_image_info(self, image_id): """Unsets all Sahara-related information. It removes username, description and tags from the specified image. """ image = self.get(image_id) meta = [PROP_TAG + tag for tag in image.tags] if image.description is not None: meta += [PROP_DESCR] if image.username is not None: meta += [PROP_USERNAME] self.delete_meta(image_id, meta) def tag(self, image_id, tags): """Adds tags to the specified image.""" tags = _ensure_tags(tags) self.set_meta(image_id, {PROP_TAG + tag: 'True' for tag in tags}) def untag(self, image_id, tags): """Removes tags from the specified image.""" tags = _ensure_tags(tags) self.delete_meta(image_id, [PROP_TAG + tag for tag in tags]) def list_by_tags(self, tags): """Returns images having all of the specified tags.""" tags = _ensure_tags(tags) return [i for i in self.list() if set(tags).issubset(i.tags)] def list_registered(self, name=None, tags=None): tags = _ensure_tags(tags) images_list = [i for i in self.list() if i.username and set(tags).issubset(i.tags)] if name: return [i for i in images_list if name in i.name] else: return images_list def get_registered_image(self, image_id): img = self.get(image_id) if img.username: return img else: raise exc.ImageNotRegistered(image_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/keystone.py0000664000175000017500000002353100000000000021665 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from keystoneauth1 import identity as keystone_identity from keystoneclient.v2_0 import client as keystone_client from keystoneclient.v3 import client as keystone_client_v3 from oslo_config import cfg from oslo_log import log as logging from sahara import context from sahara.service import sessions from sahara.utils.openstack import base LOG = logging.getLogger(__name__) def _get_keystoneauth_cfg(name): """get the keystone auth cfg Fetch value of keystone_authtoken group from config file when not available as part of GroupAttr. :rtype: String :param name: property name to be retrieved """ try: value_list = CONF._namespace._get_file_value([('keystone_authtoken', name)]) if isinstance(value_list, tuple): value_list = value_list[0] cfg_val = value_list[0] if name == "auth_url" and not re.findall(r'\/v[2-3].*', cfg_val): cfg_val += "/v3" return cfg_val except KeyError: if name in ["user_domain_name", "project_domain_name"]: return "Default" else: raise def validate_config(): if any(map(lambda o: getattr(CONF.trustee, o) is None, CONF.trustee)): for replace_opt in CONF.trustee: CONF.set_override(replace_opt, _get_keystoneauth_cfg(replace_opt), group="trustee") LOG.warning(""" __ __ _ \ \ / /_ _ _ __ _ __ (_)_ __ __ _ \ \ /\ / / _` | '__| '_ \| | '_ \ / _` | \ V V / (_| | | | | | | | | | | (_| | \_/\_/ \__,_|_| |_| |_|_|_| |_|\__, | |___/ Using the [keystone_authtoken] user as the Sahara trustee user directly is deprecated. Please add the trustee credentials you need to the [trustee] section of your sahara.conf file. """) opts = [ # TODO(alazarev) Move to [keystone] section cfg.BoolOpt('use_identity_api_v3', default=True, help='Enables Sahara to use Keystone API v3. ' 'If that flag is disabled, ' 'per-job clusters will not be terminated ' 'automatically.') ] ssl_opts = [ cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to keystone.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for keystone ' 'client requests.'), cfg.StrOpt("endpoint_type", default="internalURL", help="Endpoint type for keystone client requests") ] keystone_group = cfg.OptGroup(name='keystone', title='Keystone client options') trustee_opts = [ cfg.StrOpt('username', help='Username for trusts creation'), cfg.StrOpt('password', help='Password for trusts creation'), cfg.StrOpt('project_name', help='Project name for trusts creation'), cfg.StrOpt('user_domain_name', help='User domain name for trusts creation', default="Default"), cfg.StrOpt('project_domain_name', help='Project domain name for trusts creation', default="Default"), cfg.StrOpt('auth_url', help='Auth url for trusts creation'), ] trustee_group = cfg.OptGroup(name='trustee', title="Trustee options") CONF = cfg.CONF CONF.register_group(keystone_group) CONF.register_group(trustee_group) CONF.register_opts(opts) CONF.register_opts(ssl_opts, group=keystone_group) CONF.register_opts(trustee_opts, group=trustee_group) def auth(): '''Return a token auth plugin for the current context.''' ctx = context.current() return ctx.auth_plugin or token_auth(token=context.get_auth_token(), project_id=ctx.tenant_id) def auth_for_admin(project_name=None, trust_id=None): '''Return an auth plugin for the admin. :param project_name: a project to scope the auth with (optional). :param trust_id: a trust to scope the auth with (optional). :returns: an auth plugin object for the admin. ''' # TODO(elmiko) revisit the project_domain_name if we start getting # into federated authentication. it will need to match the domain that # the project_name exists in. auth = _password_auth( username=CONF.trustee.username, password=CONF.trustee.password, project_name=project_name, user_domain_name=CONF.trustee.user_domain_name, project_domain_name=CONF.trustee.project_domain_name, trust_id=trust_id) return auth def auth_for_proxy(username, password, trust_id=None): '''Return an auth plugin for the proxy user. :param username: the name of the proxy user. :param password: the proxy user's password. :param trust_id: a trust to scope the auth with (optional). :returns: an auth plugin object for the proxy user. ''' auth = _password_auth( username=username, password=password, user_domain_name=CONF.proxy_user_domain_name, trust_id=trust_id) return auth def client(): '''Return the current context client.''' return client_from_auth(auth()) def client_for_admin(): '''Return the Sahara admin user client.''' auth = auth_for_admin( project_name=CONF.trustee.project_name) return client_from_auth(auth) def client_from_auth(auth): '''Return a session based client from the auth plugin provided. A session is obtained from the global session cache. :param auth: the auth plugin object to use in client creation. :returns: a keystone client ''' session = sessions.cache().get_session(sessions.SESSION_TYPE_KEYSTONE) if CONF.use_identity_api_v3: client_class = keystone_client_v3.Client else: client_class = keystone_client.Client return client_class(session=session, auth=auth) def project_id_from_auth(auth): '''Return the project id associated with an auth plugin. :param auth: the auth plugin to inspect. :returns: the project id associated with the auth plugin. ''' return auth.get_project_id( sessions.cache().get_session(sessions.SESSION_TYPE_KEYSTONE)) def service_catalog_from_auth(auth): '''Return the service catalog associated with an auth plugin. :param auth: the auth plugin to inspect. :returns: a list containing the service catalog. ''' access_info = auth.get_access( sessions.cache().get_session(sessions.SESSION_TYPE_KEYSTONE)) if access_info.has_service_catalog(): return access_info.service_catalog.catalog else: return [] def token_auth(token, project_id=None, project_name=None, project_domain_name='Default'): '''Return a token auth plugin object. :param token: the token to use for authentication. :param project_id: the project(ex. tenant) id to scope the auth. :returns: a token auth plugin object. ''' token_kwargs = dict( auth_url=base.retrieve_auth_url(CONF.keystone.endpoint_type), token=token ) if CONF.use_identity_api_v3: token_kwargs.update(dict( project_id=project_id, project_name=project_name, project_domain_name=project_domain_name, )) auth = keystone_identity.v3.Token(**token_kwargs) else: token_kwargs.update(dict( tenant_id=project_id, tenant_name=project_name, )) auth = keystone_identity.v2.Token(**token_kwargs) return auth def token_from_auth(auth): '''Return an authentication token from an auth plugin. :param auth: the auth plugin to acquire a token from. :returns: an auth token in string format. ''' return sessions.cache().token_for_auth(auth) def user_id_from_auth(auth): '''Return a user id associated with an auth plugin. :param auth: the auth plugin to inspect. :returns: a token associated with the auth. ''' return auth.get_user_id(sessions.cache().get_session( sessions.SESSION_TYPE_KEYSTONE)) def _password_auth(username, password, project_name=None, user_domain_name=None, project_domain_name=None, trust_id=None): '''Return a password auth plugin object. :param username: the user to authenticate as. :param password: the user's password. :param project_name: the project(ex. tenant) name to scope the auth. :param user_domain_name: the domain the user belongs to. :param project_domain_name: the domain the project belongs to. :param trust_id: a trust id to scope the auth. :returns: a password auth plugin object. ''' passwd_kwargs = dict( auth_url=CONF.trustee.auth_url, username=username, password=password ) if CONF.use_identity_api_v3: passwd_kwargs.update(dict( project_name=project_name, user_domain_name=user_domain_name, project_domain_name=project_domain_name, trust_id=trust_id )) auth = keystone_identity.v3.Password(**passwd_kwargs) else: passwd_kwargs.update(dict( tenant_name=project_name, trust_id=trust_id )) auth = keystone_identity.v2.Password(**passwd_kwargs) return auth ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/manila.py0000664000175000017500000000454200000000000021266 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import manilaclient.client as manila_client try: from manilaclient.common.apiclient import exceptions as manila_ex except ImportError: from manilaclient.openstack.common.apiclient import exceptions as manila_ex from oslo_config import cfg from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.utils.openstack import base opts = [ cfg.StrOpt('api_version', default='1', help='Version of the manila API to use.'), cfg.BoolOpt('api_insecure', default=True, help='Allow to perform insecure SSL requests to manila.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for manila ' 'client requests.') ] manila_group = cfg.OptGroup(name='manila', title='Manila client options') CONF = cfg.CONF CONF.register_group(manila_group) CONF.register_opts(opts, group=manila_group) MANILA_PREFIX = "manila://" def client(): ctx = context.ctx() args = { 'username': ctx.username, 'project_name': ctx.tenant_name, 'project_id': ctx.tenant_id, 'input_auth_token': context.get_auth_token(), 'auth_url': base.retrieve_auth_url(), 'service_catalog_url': base.url_for(ctx.service_catalog, 'share'), 'ca_cert': CONF.manila.ca_file, 'insecure': CONF.manila.api_insecure } return manila_client.Client(CONF.manila.api_version, **args) def get_share(client_instance, share_id, raise_on_error=False): try: return client_instance.shares.get(share_id) except manila_ex.NotFound: if raise_on_error: raise ex.NotFoundException( share_id, _("Share with id %s was not found.")) else: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/neutron.py0000664000175000017500000000744100000000000021520 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hortonworks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from neutronclient.common import exceptions as n_ex from neutronclient.neutron import client as neutron_cli from oslo_config import cfg from oslo_log import log as logging from sahara import exceptions as ex from sahara.i18n import _ from sahara.service import sessions from sahara.utils.openstack import base from sahara.utils.openstack import keystone opts = [ cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to neutron.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for neutron ' 'client requests.'), cfg.StrOpt("endpoint_type", default="internalURL", help="Endpoint type for neutron client requests") ] neutron_group = cfg.OptGroup(name='neutron', title='Neutron client options') CONF = cfg.CONF CONF.register_group(neutron_group) CONF.register_opts(opts, group=neutron_group) LOG = logging.getLogger(__name__) def client(auth=None): if not auth: auth = keystone.auth() session = sessions.cache().get_session(sessions.SESSION_TYPE_NEUTRON) neutron = neutron_cli.Client('2.0', session=session, auth=auth, endpoint_type=CONF.neutron.endpoint_type, region_name=CONF.os_region_name) return neutron class NeutronClient(object): neutron = None routers = {} def __init__(self, network, token, tenant_name, auth=None): if not auth: auth = keystone.token_auth(token=token, project_name=tenant_name) self.neutron = client(auth) self.network = network def get_router(self): matching_router = NeutronClient.routers.get(self.network, None) if matching_router: LOG.debug('Returning cached qrouter') return matching_router['id'] routers = self.neutron.list_routers()['routers'] for router in routers: device_id = router['id'] ports = base.execute_with_retries( self.neutron.list_ports, device_id=device_id)['ports'] port = next((port for port in ports if port['network_id'] == self.network), None) if port: matching_router = router NeutronClient.routers[self.network] = matching_router break if not matching_router: raise ex.SystemError(_('Neutron router corresponding to network ' '%s is not found') % self.network) return matching_router['id'] def get_private_network_cidrs(cluster): neutron_client = client() private_net = base.execute_with_retries(neutron_client.show_network, cluster.neutron_management_network) cidrs = [] for subnet_id in private_net['network']['subnets']: subnet = base.execute_with_retries( neutron_client.show_subnet, subnet_id) cidrs.append(subnet['subnet']['cidr']) return cidrs def get_network(id): try: return base.execute_with_retries( client().find_resource_by_id, 'network', id) except n_ex.NotFound: return None ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/nova.py0000664000175000017500000000376300000000000020774 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from novaclient import client as nova_client from oslo_config import cfg from sahara.service import sessions import sahara.utils.openstack.base as base from sahara.utils.openstack import keystone opts = [ cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to nova.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for nova ' 'client requests.'), cfg.StrOpt("endpoint_type", default="internalURL", help="Endpoint type for nova client requests") ] nova_group = cfg.OptGroup(name='nova', title='Nova client options') CONF = cfg.CONF CONF.register_group(nova_group) CONF.register_opts(opts, group=nova_group) def client(): session = sessions.cache().get_session(sessions.SESSION_TYPE_NOVA) nova = nova_client.Client('2', session=session, auth=keystone.auth(), endpoint_type=CONF.nova.endpoint_type, region_name=CONF.os_region_name) return nova def get_flavor(**kwargs): return base.execute_with_retries(client().flavors.find, **kwargs) def get_instance_info(instance): return base.execute_with_retries( client().servers.get, instance.instance_id) def get_keypair(keypair_name): return base.execute_with_retries( client().keypairs.get, keypair_name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/openstack/swift.py0000664000175000017500000000705400000000000021162 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import swiftclient from sahara import context from sahara.swift import swift_helper as sh from sahara.swift import utils as su from sahara.utils.openstack import base from sahara.utils.openstack import keystone as k opts = [ cfg.BoolOpt('api_insecure', default=False, help='Allow to perform insecure SSL requests to swift.'), cfg.StrOpt('ca_file', help='Location of ca certificates file to use for swift ' 'client requests.'), cfg.StrOpt("endpoint_type", default="internalURL", help="Endpoint type for swift client requests") ] swift_group = cfg.OptGroup(name='swift', title='Swift client options') CONF = cfg.CONF CONF.register_group(swift_group) CONF.register_opts(opts, group=swift_group) def client(username, password, trust_id=None): '''return a Swift client This will return a Swift client for the specified username scoped to the current context project, unless a trust identifier is specified. If a trust identifier is present then the Swift client will be created based on a preauthorized token generated by the username scoped to the trust identifier. :param username: The username for the Swift client :param password: The password associated with the username :param trust_id: A trust identifier for scoping the username (optional) :returns: A Swift client object ''' if trust_id: proxyauth = k.auth_for_proxy(username, password, trust_id) return client_from_token(k.token_from_auth(proxyauth)) else: return swiftclient.Connection( auth_version='3', cacert=CONF.swift.ca_file, insecure=CONF.swift.api_insecure, authurl=su.retrieve_auth_url(CONF.keystone.endpoint_type), user=username, key=password, tenant_name=sh.retrieve_tenant(), retries=CONF.retries.retries_number, retry_on_ratelimit=True, starting_backoff=CONF.retries.retry_after, max_backoff=CONF.retries.retry_after) def client_from_token(token=None): if not token: token = context.get_auth_token() '''return a Swift client authenticated from a token.''' return swiftclient.Connection(auth_version='3', cacert=CONF.swift.ca_file, insecure=CONF.swift.api_insecure, preauthurl=base.url_for( service_type="object-store", endpoint_type=CONF.swift.endpoint_type), preauthtoken=token, retries=CONF.retries.retries_number, retry_on_ratelimit=True, starting_backoff=CONF.retries.retry_after, max_backoff=CONF.retries.retry_after) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/patches.py0000664000175000017500000000346200000000000017465 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import eventlet EVENTLET_MONKEY_PATCH_MODULES = dict(os=True, select=True, socket=True, thread=True, time=True) def patch_all(): """Apply all patches. List of patches: * eventlet's monkey patch for all cases; """ eventlet_monkey_patch() def eventlet_monkey_patch(): """Apply eventlet's monkey patch. This call should be the first call in application. It's safe to call monkey_patch multiple times. """ eventlet.monkey_patch(**EVENTLET_MONKEY_PATCH_MODULES) # Monkey patch the original current_thread to use the up-to-date _active # global variable. See https://bugs.launchpad.net/bugs/1863021 and # https://github.com/eventlet/eventlet/issues/592 import __original_module_threading as orig_threading import threading # noqa orig_threading.current_thread.__globals__['_active'] = threading._active def eventlet_import_monkey_patched(module): """Returns module monkey patched by eventlet. It's needed for some tests, for example, context test. """ return eventlet.import_patched(module, **EVENTLET_MONKEY_PATCH_MODULES) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/poll_utils.py0000664000175000017500000001253000000000000020220 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools from oslo_config import cfg from oslo_log import log as logging from oslo_utils import timeutils from sahara import context from sahara import exceptions as ex from sahara.utils import cluster as cluster_utils LOG = logging.getLogger(__name__) # set 3 hours timeout by default DEFAULT_TIMEOUT = 10800 DEFAULT_SLEEP_TIME = 5 timeouts_opts = [ # engine opts cfg.IntOpt('ips_assign_timeout', default=DEFAULT_TIMEOUT, help="Assign IPs timeout, in seconds"), cfg.IntOpt('wait_until_accessible', default=DEFAULT_TIMEOUT, help="Wait for instance accessibility, in seconds"), # direct engine opts cfg.IntOpt('delete_instances_timeout', default=DEFAULT_TIMEOUT, help="Wait for instances to be deleted, in seconds"), # volumes opts cfg.IntOpt( 'detach_volume_timeout', default=300, help='Timeout for detaching volumes from instance, in seconds'), ] timeouts = cfg.OptGroup(name='timeouts', title='Sahara timeouts') CONF = cfg.CONF CONF.register_group(timeouts) CONF.register_opts(timeouts_opts, group=timeouts) def _get_consumed(started_at): return timeutils.delta_seconds(started_at, timeutils.utcnow()) def _get_current_value(cluster, option): option_target = option.applicable_target conf = cluster.cluster_configs if option_target in conf and option.name in conf[option_target]: return conf[option_target][option.name] return option.default_value def poll(get_status, kwargs=None, args=None, operation_name=None, timeout_name=None, timeout=DEFAULT_TIMEOUT, sleep=DEFAULT_SLEEP_TIME, exception_strategy='raise'): """This util poll status of object obj during some timeout. :param get_status: function, which return current status of polling as Boolean :param kwargs: keyword arguments of function get_status :param operation_name: name of polling process :param timeout_name: name of timeout option :param timeout: value of timeout in seconds. By default, it equals to 3 hours :param sleep: duration between two consecutive executions of get_status function :param exception_strategy: possible values ('raise', 'mark_as_true', 'mark_as_false'). If exception_strategy is 'raise' exception would be raised. If exception_strategy is 'mark_as_true', return value of get_status would marked as True, and in case of 'mark_as_false' - False. By default it's 'raise'. """ start_time = timeutils.utcnow() # We shouldn't raise TimeoutException if incorrect timeout specified and # status is ok now. In such way we should execute get_status at least once. at_least_once = True if not kwargs: kwargs = {} if not args: args = () while at_least_once or _get_consumed(start_time) < timeout: at_least_once = False try: status = get_status(*args, **kwargs) except BaseException: if exception_strategy == 'raise': raise elif exception_strategy == 'mark_as_true': status = True else: status = False if status: operation = "Operation" if operation_name: operation = "Operation with name {op_name}".format( op_name=operation_name) LOG.debug( '{operation_desc} was executed successfully in timeout ' '{timeout}' .format(operation_desc=operation, timeout=timeout)) return context.sleep(sleep) raise ex.TimeoutException(timeout, operation_name, timeout_name) def plugin_option_poll(cluster, get_status, option, operation_name, sleep_time, kwargs): def _get(n_cluster, n_kwargs): if not cluster_utils.check_cluster_exists(n_cluster): return True return get_status(**n_kwargs) poll_description = { 'get_status': _get, 'kwargs': {'n_cluster': cluster, 'n_kwargs': kwargs}, 'timeout': _get_current_value(cluster, option), 'operation_name': operation_name, 'sleep': sleep_time, 'timeout_name': option.name } poll(**poll_description) def poll_status(option, operation_name, sleep): def decorator(f): @functools.wraps(f) def handler(*args, **kwargs): poll_description = { 'get_status': f, 'kwargs': kwargs, 'args': args, 'timeout': getattr(CONF.timeouts, option), 'operation_name': operation_name, 'timeout_name': option, 'sleep': sleep, } poll(**poll_description) return handler return decorator ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/procutils.py0000664000175000017500000000570600000000000020065 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import pickle # nosec import sys from eventlet.green import subprocess from eventlet import timeout as e_timeout from sahara import context from sahara import exceptions def _get_sub_executable(): return '%s/_sahara-subprocess' % os.path.dirname(sys.argv[0]) def start_subprocess(): return subprocess.Popen((sys.executable, _get_sub_executable()), close_fds=True, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) def run_in_subprocess(proc, func, args=None, kwargs=None, interactive=False): args = args or () kwargs = kwargs or {} try: # TODO(elmiko) these pickle usages should be reinvestigated to # determine a more secure manner to deploy remote commands. pickle.dump(func, proc.stdin, protocol=2) # nosec pickle.dump(args, proc.stdin, protocol=2) # nosec pickle.dump(kwargs, proc.stdin, protocol=2) # nosec proc.stdin.flush() if not interactive: result = pickle.load(proc.stdout) # nosec if 'exception' in result: raise exceptions.SubprocessException(result['exception']) return result['output'] finally: # NOTE(dmitryme): in oslo.concurrency's file processutils.py it # is suggested to sleep a little between calls to multiprocessing. # That should allow it make some necessary cleanup context.sleep(0) def _finish(cleanup_func): cleanup_func() sys.stdin.close() sys.stdout.close() sys.stderr.close() sys.exit(0) def shutdown_subprocess(proc, cleanup_func): try: with e_timeout.Timeout(5): # timeout would mean that our single-threaded subprocess # is hung on previous task which blocks _finish to complete run_in_subprocess(proc, _finish, (cleanup_func,)) except BaseException: # exception could be caused by either timeout, or # successful shutdown, ignoring anyway pass finally: kill_subprocess(proc) def kill_subprocess(proc): proc.stdin.close() proc.stdout.close() proc.stderr.close() try: proc.kill() proc.wait() except OSError: # could be caused by process already dead, so ignoring pass ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/proxy.py0000664000175000017500000002634300000000000017222 0ustar00zuulzuul00000000000000# Copyright (c) 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils import six from sahara import conductor as c from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.service.castellan import utils as key_manager from sahara.service.edp import job_utils from sahara.service import trusts as t from sahara.swift import utils as su from sahara.utils.openstack import base as b from sahara.utils.openstack import keystone as k PROXY_DOMAIN = None conductor = c.API LOG = logging.getLogger(__name__) CONF = cfg.CONF opts = [ cfg.BoolOpt('use_domain_for_proxy_users', default=False, help='Enables Sahara to use a domain for creating temporary ' 'proxy users to access Swift. If this is enabled ' 'a domain must be created for Sahara to use.'), cfg.StrOpt('proxy_user_domain_name', default=None, help='The domain Sahara will use to create new proxy users ' 'for Swift object access.'), cfg.ListOpt('proxy_user_role_names', default=['member'], help='A list of the role names that the proxy user should ' 'assume through trust for Swift object access.') ] CONF.register_opts(opts) def create_proxy_user_for_job_execution(job_execution): '''Creates a proxy user and adds the credentials to the job execution :param job_execution: The job execution model to update ''' username = 'job_{0}'.format(job_execution.id) password = key_manager.store_secret(proxy_user_create(username)) current_user = k.auth() proxy_user = k.auth_for_proxy(username, password) trust_id = t.create_trust(trustor=current_user, trustee=proxy_user, role_names=CONF.proxy_user_role_names) update = {'job_configs': job_execution.job_configs.to_dict()} update['job_configs']['proxy_configs'] = { 'proxy_username': username, 'proxy_password': password, 'proxy_trust_id': trust_id } conductor.job_execution_update(context.ctx(), job_execution, update) def delete_proxy_user_for_job_execution(job_execution): '''Delete a proxy user based on a JobExecution :param job_execution: The job execution with proxy user information :returns: An updated job_configs dictionary or None ''' proxy_configs = job_execution.job_configs.get('proxy_configs') if proxy_configs is not None: proxy_username = proxy_configs.get('proxy_username') proxy_trust_id = proxy_configs.get('proxy_trust_id') proxy_user = k.auth_for_proxy(proxy_username, key_manager.get_secret( proxy_configs.get('proxy_password')), proxy_trust_id) t.delete_trust(proxy_user, proxy_trust_id) proxy_user_delete(proxy_username) key_manager.delete_secret(proxy_configs.get('proxy_password')) update = job_execution.job_configs.to_dict() del update['proxy_configs'] return update return None def create_proxy_user_for_cluster(cluster): '''Creates a proxy user and adds the credentials to the cluster :param cluster: The cluster model to update ''' if cluster.cluster_configs.get('proxy_configs'): return cluster username = 'cluster_{0}'.format(cluster.id) password = key_manager.store_secret(proxy_user_create(username)) current_user = k.auth() proxy_user = k.auth_for_proxy(username, password) trust_id = t.create_trust(trustor=current_user, trustee=proxy_user, role_names=CONF.proxy_user_role_names) update = {'cluster_configs': cluster.cluster_configs.to_dict()} update['cluster_configs']['proxy_configs'] = { 'proxy_username': username, 'proxy_password': password, 'proxy_trust_id': trust_id } return conductor.cluster_update(context.ctx(), cluster, update) def delete_proxy_user_for_cluster(cluster): '''Delete a proxy user based on a Cluster :param cluster: The cluster model with proxy user information ''' proxy_configs = cluster.cluster_configs.get('proxy_configs') if proxy_configs is not None: proxy_username = proxy_configs.get('proxy_username') proxy_trust_id = proxy_configs.get('proxy_trust_id') proxy_user = k.auth_for_proxy(proxy_username, key_manager.get_secret( proxy_configs.get('proxy_password')), proxy_trust_id) t.delete_trust(proxy_user, proxy_trust_id) proxy_user_delete(proxy_username) key_manager.delete_secret(proxy_configs.get('proxy_password')) update = {'cluster_configs': cluster.cluster_configs.to_dict()} del update['cluster_configs']['proxy_configs'] conductor.cluster_update(context.ctx(), cluster, update) def domain_for_proxy(): '''Return the proxy domain or None If configured to use the proxy domain, this function will return that domain. If not configured to use the proxy domain, this function will return None. If the proxy domain can't be found this will raise an exception. :returns: A Keystone Domain object or None. :raises ConfigurationError: If the domain is requested but not specified. :raises NotFoundException: If the domain name is specified but cannot be found. ''' if CONF.use_domain_for_proxy_users is False: return None if CONF.proxy_user_domain_name is None: raise ex.ConfigurationError(_('Proxy domain requested but not ' 'specified.')) admin = k.client_for_admin() global PROXY_DOMAIN if not PROXY_DOMAIN: domain_list = b.execute_with_retries( admin.domains.list, name=CONF.proxy_user_domain_name) if len(domain_list) == 0: raise ex.NotFoundException( value=CONF.proxy_user_domain_name, message_template=_('Failed to find domain %s')) # the domain name should be globally unique in Keystone if len(domain_list) > 1: raise ex.NotFoundException( value=CONF.proxy_user_domain_name, message_template=_('Unexpected results found when searching ' 'for domain %s')) PROXY_DOMAIN = domain_list[0] return PROXY_DOMAIN def job_execution_requires_proxy_user(job_execution): '''Returns True if the job execution requires a proxy user.''' def _check_values(values): return any(value.startswith( su.SWIFT_INTERNAL_PREFIX) for value in values if ( isinstance(value, six.string_types))) if CONF.use_domain_for_proxy_users is False: return False paths = [conductor.data_source_get(context.ctx(), job_execution.output_id), conductor.data_source_get(context.ctx(), job_execution.input_id)] if _check_values(ds.url for ds in paths if ds): return True if _check_values(six.itervalues( job_execution.job_configs.get('configs', {}))): return True if _check_values(six.itervalues( job_execution.job_configs.get('params', {}))): return True if _check_values(job_execution.job_configs.get('args', [])): return True job = conductor.job_get(context.ctx(), job_execution.job_id) if _check_values(main.url for main in job.mains): return True if _check_values(lib.url for lib in job.libs): return True # We did the simple checks, now if data_source referencing is # enabled and we have values that could be a name or uuid, # query for data_sources that match and contain a swift path by_name, by_uuid = job_utils.may_contain_data_source_refs( job_execution.job_configs) if by_name: names = tuple(job_utils.find_possible_data_source_refs_by_name( job_execution.job_configs)) # do a query here for name in names and path starts with swift-prefix if names and conductor.data_source_count( context.ctx(), name=names, url=su.SWIFT_INTERNAL_PREFIX+'%') > 0: return True if by_uuid: uuids = tuple(job_utils.find_possible_data_source_refs_by_uuid( job_execution.job_configs)) # do a query here for id in uuids and path starts with swift-prefix if uuids and conductor.data_source_count( context.ctx(), id=uuids, url=su.SWIFT_INTERNAL_PREFIX+'%') > 0: return True return False def proxy_domain_users_list(): '''Return a list of all users in the proxy domain.''' admin = k.client_for_admin() domain = domain_for_proxy() if domain: return b.execute_with_retries(admin.users.list, domain=domain.id) return [] def proxy_user_create(username): '''Create a new user in the proxy domain Creates the username specified with a random password. :param username: The name of the new user. :returns: The password created for the user. ''' admin = k.client_for_admin() domain = domain_for_proxy() password = uuidutils.generate_uuid() b.execute_with_retries( admin.users.create, name=username, password=password, domain=domain.id) LOG.debug('Created proxy user {username}'.format(username=username)) return password def proxy_user_delete(username=None, user_id=None): '''Delete the user from the proxy domain. :param username: The name of the user to delete. :param user_id: The id of the user to delete, if provided this overrides the username. :raises NotFoundException: If there is an error locating the user in the proxy domain. ''' admin = k.client_for_admin() if not user_id: domain = domain_for_proxy() user_list = b.execute_with_retries( admin.users.list, domain=domain.id, name=username) if len(user_list) == 0: raise ex.NotFoundException( value=username, message_template=_('Failed to find user %s')) if len(user_list) > 1: raise ex.NotFoundException( value=username, message_template=_('Unexpected results found when searching ' 'for user %s')) user_id = user_list[0].id b.execute_with_retries(admin.users.delete, user_id) LOG.debug('Deleted proxy user id {user_id}'.format(user_id=user_id)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/remote.py0000664000175000017500000001260100000000000017324 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2013 Hortonworks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from oslo_config import cfg import six from sahara import exceptions as ex from sahara.i18n import _ # These options are for SSH remote only ssh_opts = [ cfg.IntOpt('global_remote_threshold', default=100, help='Maximum number of remote operations that will ' 'be running at the same time. Note that each ' 'remote operation requires its own process to ' 'run.'), cfg.IntOpt('cluster_remote_threshold', default=70, help='The same as global_remote_threshold, but for ' 'a single cluster.'), cfg.StrOpt('proxy_command', default='', help='Proxy command used to connect to instances. If set, this ' 'command should open a netcat socket, that Sahara will use for ' 'SSH and HTTP connections. Use {host} and {port} to describe ' 'the destination. Other available keywords: {tenant_id}, ' '{network_id}, {router_id}.'), cfg.BoolOpt('proxy_command_use_internal_ip', default=False, help='Force proxy_command usage to be consuming internal IP ' 'always, instead of management IP. Ignored if proxy_command ' 'is not set.') ] CONF = cfg.CONF CONF.register_opts(ssh_opts) DRIVER = None @six.add_metaclass(abc.ABCMeta) class RemoteDriver(object): @abc.abstractmethod def setup_remote(self, engine): """Performs driver initialization.""" @abc.abstractmethod def get_remote(self, instance): """Returns driver specific Remote.""" @abc.abstractmethod def get_userdata_template(self): """Returns userdata template preparing instance to work with driver.""" @abc.abstractmethod def get_type_and_version(self): """Returns engine type and version Result should be in the form 'type.major.minor'. """ @six.add_metaclass(abc.ABCMeta) class TerminalOnlyRemote(object): @abc.abstractmethod def execute_command(self, cmd, run_as_root=False, get_stderr=False, raise_when_error=True, timeout=300): """Execute specified command remotely using existing ssh connection. Return exit code, stdout data and stderr data of the executed command. """ @abc.abstractmethod def get_os_distrib(self): """Returns the OS distribution running on the target machine.""" @six.add_metaclass(abc.ABCMeta) class Remote(TerminalOnlyRemote): @abc.abstractmethod def get_neutron_info(self): """Returns dict which later could be passed to get_http_client.""" @abc.abstractmethod def get_http_client(self, port, info=None): """Returns HTTP client for a given instance's port.""" @abc.abstractmethod def close_http_session(self, port): """Closes cached HTTP session for a given instance's port.""" @abc.abstractmethod def write_file_to(self, remote_file, data, run_as_root=False, timeout=120): """Create remote file and write the given data to it. Uses existing ssh connection. """ @abc.abstractmethod def append_to_file(self, r_file, data, run_as_root=False, timeout=120): """Append the given data to remote file. Uses existing ssh connection. """ @abc.abstractmethod def write_files_to(self, files, run_as_root=False, timeout=120): """Copy file->data dictionary in a single ssh connection.""" @abc.abstractmethod def append_to_files(self, files, run_as_root=False, timeout=120): """Copy file->data dictionary in a single ssh connection.""" @abc.abstractmethod def read_file_from(self, remote_file, run_as_root=False, timeout=120): """Read remote file from the specified host and return given data.""" @abc.abstractmethod def replace_remote_string(self, remote_file, old_str, new_str, timeout=120): """Replaces strings in remote file using sed command.""" def setup_remote(driver, engine): global DRIVER DRIVER = driver DRIVER.setup_remote(engine) def get_remote_type_and_version(): return DRIVER.get_type_and_version() def _check_driver_is_loaded(): if not DRIVER: raise ex.SystemError(_('Remote driver is not loaded. Most probably ' 'you see this error because you are running ' 'Sahara in distributed mode and it is broken.' 'Try running sahara-all instead.')) def get_remote(instance): """Returns Remote for a given instance.""" _check_driver_is_loaded() return DRIVER.get_remote(instance) def get_userdata_template(): """Returns userdata template as a string.""" _check_driver_is_loaded() return DRIVER.get_userdata_template() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/resources.py0000664000175000017500000000412500000000000020045 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import six class BaseResource(object): __resource_name__ = 'base' __filter_cols__ = [] @property def dict(self): return self.to_dict() @property def wrapped_dict(self): return {self.__resource_name__: self.dict} @property def __all_filter_cols__(self): cls = self.__class__ if not hasattr(cls, '__mro_filter_cols__'): filter_cols = [] for base_cls in inspect.getmro(cls): filter_cols += getattr(base_cls, '__filter_cols__', []) cls.__mro_filter_cols__ = set(filter_cols) return cls.__mro_filter_cols__ def _filter_field(self, k): return k == '_sa_instance_state' or k in self.__all_filter_cols__ def to_dict(self): dictionary = self.__dict__.copy() return {k: v for k, v in six.iteritems(dictionary) if not self._filter_field(k)} def as_resource(self): return Resource(self.__resource_name__, self.to_dict()) class Resource(BaseResource): def __init__(self, _name, _info): self._name = _name self.__resource_name__ = _name self._info = _info def __getattr__(self, k): if k not in self.__dict__: return self._info.get(k) return self.__dict__[k] def __repr__(self): return '<%s %s>' % (self._name, self._info) def __eq__(self, other): return self._name == other._name and self._info == other._info def to_dict(self): return self._info.copy() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/rpc.py0000664000175000017500000000715400000000000016624 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from oslo_serialization import jsonutils from sahara import context MESSAGING_TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None CONF = cfg.CONF LOG = logging.getLogger(__name__) class ContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, ctxt, entity): return self._base.serialize_entity(ctxt, entity) def deserialize_entity(self, ctxt, entity): return self._base.deserialize_entity(ctxt, entity) @staticmethod def serialize_context(ctxt): return ctxt.to_dict() @staticmethod def deserialize_context(ctxt): pass class JsonPayloadSerializer(messaging.NoOpSerializer): @classmethod def serialize_entity(cls, context, entity): return jsonutils.to_primitive(entity, convert_instances=True) class RPCClient(object): def __init__(self, target): global MESSAGING_TRANSPORT self.__client = messaging.RPCClient( target=target, transport=MESSAGING_TRANSPORT, ) def cast(self, name, **kwargs): ctx = context.current() self.__client.cast(ctx.to_dict(), name, **kwargs) def call(self, name, **kwargs): ctx = context.current() return self.__client.call(ctx.to_dict(), name, **kwargs) class RPCServer(object): def __init__(self, target): global MESSAGING_TRANSPORT access_policy = dispatcher.DefaultRPCAccessPolicy self.__server = messaging.get_rpc_server( target=target, transport=MESSAGING_TRANSPORT, endpoints=[self], executor='eventlet', access_policy=access_policy) def get_service(self): return self.__server def setup_service_messaging(): global MESSAGING_TRANSPORT if MESSAGING_TRANSPORT: # Already is up return MESSAGING_TRANSPORT = messaging.get_rpc_transport(cfg.CONF) def setup_notifications(): global NOTIFICATION_TRANSPORT, NOTIFIER, MESSAGING_TRANSPORT try: NOTIFICATION_TRANSPORT = messaging.get_notification_transport(cfg.CONF) except Exception: LOG.error("Unable to setup notification transport. Reusing " "service transport for that.") setup_service_messaging() NOTIFICATION_TRANSPORT = MESSAGING_TRANSPORT serializer = ContextSerializer(JsonPayloadSerializer()) NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) def setup(service_name): """Initialise the oslo_messaging layer.""" messaging.set_transport_defaults('sahara') setup_notifications() if service_name != 'all-in-one': setup_service_messaging() def get_notifier(publisher_id): """Return a configured oslo_messaging notifier.""" return NOTIFIER.prepare(publisher_id=publisher_id) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/ssh_remote.py0000664000175000017500000010660400000000000020210 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # Copyright (c) 2013 Hortonworks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper methods for executing commands on nodes via SSH. The main access point is method get_remote(instance), it returns InstanceInteropHelper object which does the actual work. See the class for the list of available methods. It is a context manager, so it could be used with 'with' statement like that: with get_remote(instance) as r: r.execute_command(...) Note that the module offloads the ssh calls to a child process. It was implemented that way because we found no way to run paramiko and eventlet together. The private high-level module methods are implementations which are run in a separate process. """ import copy import os import shlex import sys import threading import time from eventlet.green import subprocess as e_subprocess from eventlet import semaphore from eventlet import timeout as e_timeout from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils from oslo_utils import uuidutils import paramiko import requests from requests import adapters import six from sahara import context from sahara import exceptions as ex from sahara.i18n import _ from sahara.service import trusts from sahara.utils import crypto from sahara.utils import network as net_utils from sahara.utils.openstack import neutron from sahara.utils import procutils from sahara.utils import remote LOG = logging.getLogger(__name__) CONF = cfg.CONF ssh_config_options = [ cfg.IntOpt( 'ssh_timeout_common', default=300, min=1, help="Overrides timeout for common ssh operations, in seconds"), cfg.IntOpt( 'ssh_timeout_interactive', default=1800, min=1, help="Overrides timeout for interactive ssh operations, in seconds"), cfg.IntOpt( 'ssh_timeout_files', default=600, min=1, help="Overrides timeout for ssh operations with files, in seconds"), ] CONF.register_opts(ssh_config_options) _ssh = None _proxy_ssh = None _sessions = {} INFRA = None SSH_TIMEOUTS_MAPPING = { '_execute_command': 'ssh_timeout_common', '_execute_command_interactive': 'ssh_timeout_interactive' } _global_remote_semaphore = None def _get_access_ip(instance): if CONF.proxy_command and CONF.proxy_command_use_internal_ip: return instance.internal_ip return instance.management_ip def _default_timeout(func): timeout = SSH_TIMEOUTS_MAPPING.get(func.__name__, 'ssh_timeout_files') return getattr(CONF, timeout, CONF.ssh_timeout_common) def _get_ssh_timeout(func, timeout): return timeout if timeout else _default_timeout(func) def _connect(host, username, private_key, proxy_command=None, gateway_host=None, gateway_image_username=None): global _ssh global _proxy_ssh LOG.debug('Creating SSH connection') if isinstance(private_key, six.string_types): private_key = crypto.to_paramiko_private_key(private_key) _ssh = paramiko.SSHClient() _ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) proxy = None if proxy_command: LOG.debug('Creating proxy using command: {command}'.format( command=proxy_command)) proxy = paramiko.ProxyCommand(proxy_command) if gateway_host: _proxy_ssh = paramiko.SSHClient() _proxy_ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) LOG.debug('Connecting to proxy gateway at: {gateway}'.format( gateway=gateway_host)) _proxy_ssh.connect(gateway_host, username=gateway_image_username, pkey=private_key, sock=proxy) proxy = _proxy_ssh.get_transport().open_session() proxy.exec_command("nc {0} 22".format(host)) _ssh.connect(host, username=username, pkey=private_key, sock=proxy) def _cleanup(): global _ssh global _proxy_ssh _ssh.close() if _proxy_ssh: _proxy_ssh.close() def _read_paramimko_stream(recv_func): result = b'' buf = recv_func(1024) while buf != b'': result += buf buf = recv_func(1024) return result def _escape_quotes(command): command = command.replace('\\', '\\\\') command = command.replace('"', '\\"') command = command.replace('`', '\\`') return command def _execute_command(cmd, run_as_root=False, get_stderr=False, raise_when_error=True): global _ssh chan = _ssh.get_transport().open_session() if run_as_root: chan.exec_command('sudo bash -c "%s"' % _escape_quotes(cmd)) else: chan.exec_command(cmd) # TODO(dmitryme): that could hang if stderr buffer overflows stdout = _read_paramimko_stream(chan.recv) stderr = _read_paramimko_stream(chan.recv_stderr) if type(stdout) == bytes: stdout = stdout.decode('utf-8') if type(stderr) == bytes: stderr = stderr.decode('utf-8') ret_code = chan.recv_exit_status() if ret_code and raise_when_error: raise ex.RemoteCommandException(cmd=cmd, ret_code=ret_code, stdout=stdout, stderr=stderr) if get_stderr: return ret_code, stdout, stderr else: return ret_code, stdout def _execute_command_interactive(cmd, run_as_root=False): global _ssh chan = _ssh.get_transport().open_session() if run_as_root: chan.exec_command('sudo bash -c "%s"' % _escape_quotes(cmd)) else: chan.exec_command(cmd) _proxy_shell(chan) _ssh.close() def _proxy_shell(chan): def readall(): while True: d = sys.stdin.read(1) if not d or chan.exit_status_ready(): break chan.send(d) reader = threading.Thread(target=readall) reader.start() while True: data = chan.recv(256) if not data or chan.exit_status_ready(): break sys.stdout.write(data) sys.stdout.flush() def _get_http_client(host, port, proxy_command=None, gateway_host=None, gateway_username=None, gateway_private_key=None): global _sessions _http_session = _sessions.get((host, port), None) LOG.debug('Cached HTTP session for {host}:{port} is {session}'.format( host=host, port=port, session=_http_session)) if not _http_session: if gateway_host: _http_session = _get_proxy_gateway_http_session( gateway_host, gateway_username, gateway_private_key, host, port, proxy_command) LOG.debug('Created ssh proxied HTTP session for {host}:{port}' .format(host=host, port=port)) elif proxy_command: # can return a new session here because it actually uses # the same adapter (and same connection pools) for a given # host and port tuple _http_session = _get_proxied_http_session( proxy_command, host, port=port) LOG.debug('Created proxied HTTP session for {host}:{port}' .format(host=host, port=port)) else: # need to cache the sessions that are not proxied through # HTTPRemoteWrapper so that a new session with a new HTTPAdapter # and associated pools is not recreated for each HTTP invocation _http_session = requests.Session() LOG.debug('Created standard HTTP session for {host}:{port}' .format(host=host, port=port)) adapter = requests.adapters.HTTPAdapter() for prefix in ['http://', 'https://']: _http_session.mount(prefix + '%s:%s' % (host, port), adapter) LOG.debug('Caching session {session} for {host}:{port}' .format(session=_http_session, host=host, port=port)) _sessions[(host, port)] = _http_session return _http_session def _write_fl(sftp, remote_file, data): try: write_data = paramiko.py3compat.StringIO(data) except TypeError: write_data = paramiko.py3compat.BytesIO(data) sftp.putfo(write_data, remote_file) def _append_fl(sftp, remote_file, data): fl = sftp.file(remote_file, 'a') fl.write(data) fl.close() def _write_file(sftp, remote_file, data, run_as_root): if run_as_root: temp_file = 'temp-file-%s' % uuidutils.generate_uuid() _write_fl(sftp, temp_file, data) _execute_command( 'mv %s %s' % (temp_file, remote_file), run_as_root=True) else: _write_fl(sftp, remote_file, data) def _append_file(sftp, remote_file, data, run_as_root): if run_as_root: temp_file = 'temp-file-%s' % uuidutils.generate_uuid() _write_fl(sftp, temp_file, data) _execute_command( 'cat %s >> %s' % (temp_file, remote_file), run_as_root=True) _execute_command('rm -f %s' % temp_file) else: _append_fl(sftp, remote_file, data) def _prepend_file(sftp, remote_file, data, run_as_root): if run_as_root: temp_file = 'temp-file-%s' % uuidutils.generate_uuid() temp_remote_file = 'temp-remote-file-%s' % uuidutils.generate_uuid() _write_fl(sftp, temp_file, data) _execute_command( 'cat %s > %s' % (remote_file, temp_remote_file)) _execute_command( 'cat %s %s > %s' % ( temp_file, temp_remote_file, remote_file), run_as_root=True) _execute_command('rm -f %s %s' % (temp_file, temp_remote_file)) def _write_file_to(remote_file, data, run_as_root=False): global _ssh _write_file(_ssh.open_sftp(), remote_file, data, run_as_root) def _write_files_to(files, run_as_root=False): global _ssh sftp = _ssh.open_sftp() for fl, data in six.iteritems(files): _write_file(sftp, fl, data, run_as_root) def _append_to_file(remote_file, data, run_as_root=False): global _ssh _append_file(_ssh.open_sftp(), remote_file, data, run_as_root) def _append_to_files(files, run_as_root=False): global _ssh sftp = _ssh.open_sftp() for fl, data in six.iteritems(files): _append_file(sftp, fl, data, run_as_root) def _prepend_to_file(remote_file, data, run_as_root=False): global _ssh _prepend_file(_ssh.open_sftp(), remote_file, data, run_as_root) def _prepend_to_files(files, run_as_root=False): global _ssh sftp = _ssh.open_sftp() for fl, data in six.iteritems(files): _prepend_file(sftp, fl, data, run_as_root) def _read_file(sftp, remote_file): fl = sftp.file(remote_file, 'r') data = fl.read() fl.close() try: return data.decode('utf-8') except Exception: return data def _read_file_from(remote_file, run_as_root=False): global _ssh fl = remote_file if run_as_root: fl = 'temp-file-%s' % (uuidutils.generate_uuid()) _execute_command('cp %s %s' % (remote_file, fl), run_as_root=True) try: return _read_file(_ssh.open_sftp(), fl) except IOError: LOG.error("Can't read file {filename}".format(filename=remote_file)) raise finally: if run_as_root: _execute_command( 'rm %s' % fl, run_as_root=True, raise_when_error=False) def _get_python_to_execute(): try: _execute_command('python3 --version') except Exception: _execute_command('python2 --version') return 'python2' return 'python3' def _get_os_distrib(): python_version = _get_python_to_execute() return _execute_command( ('printf "import platform\nprint(platform.linux_distribution(' 'full_distribution_name=0)[0])" | {}'.format(python_version)), run_as_root=False)[1].lower().strip() def _get_os_version(): python_version = _get_python_to_execute() return _execute_command( ('printf "import platform\nprint(platform.linux_distribution()[1])"' ' | {}'.format(python_version)), run_as_root=False)[1].strip() def _install_packages(packages): distrib = _get_os_distrib() if distrib == 'ubuntu': cmd = 'RUNLEVEL=1 apt-get install -y %(pkgs)s' elif distrib == 'fedora': fversion = _get_os_version() if fversion >= 22: cmd = 'dnf install -y %(pkgs)s' else: cmd = 'yum install -y %(pkgs)s' elif distrib in ('redhat', 'centos'): cmd = 'yum install -y %(pkgs)s' else: raise ex.NotImplementedException( _('Package Installation'), _('%(fmt)s is not implemented for OS %(distrib)s') % { 'fmt': '%s', 'distrib': distrib}) cmd = cmd % {'pkgs': ' '.join(packages)} _execute_command(cmd, run_as_root=True) def _update_repository(): distrib = _get_os_distrib() if distrib == 'ubuntu': cmd = 'apt-get update' elif distrib == 'fedora': fversion = _get_os_version() if fversion >= 22: cmd = 'dnf clean all' else: cmd = 'yum clean all' elif distrib in ('redhat', 'centos'): cmd = 'yum clean all' else: raise ex.NotImplementedException( _('Repository Update'), _('%(fmt)s is not implemented for OS %(distrib)s') % { 'fmt': '%s', 'distrib': distrib}) _execute_command(cmd, run_as_root=True) def _replace_remote_string(remote_file, old_str, new_str): old_str = old_str.replace("\'", "\''") new_str = new_str.replace("\'", "\''") cmd = "sudo sed -i 's,%s,%s,g' %s" % (old_str, new_str, remote_file) _execute_command(cmd) def _replace_remote_line(remote_file, old_line_with_start_string, new_line): search_string = old_line_with_start_string.replace("\'", "\''") cmd = ("sudo sed -i 's/^%s.*/%s/' %s" % (search_string, new_line, remote_file)) _execute_command(cmd) def _execute_on_vm_interactive(cmd, matcher): global _ssh buf = '' channel = _ssh.invoke_shell() LOG.debug('Channel is {channel}'.format(channel=channel)) try: LOG.debug('Sending cmd {command}'.format(command=cmd)) channel.send(cmd + '\n') while not matcher.is_eof(buf): buf += channel.recv(4096) response = matcher.get_response(buf) if response is not None: channel.send(response + '\n') buf = '' finally: LOG.debug('Closing channel') channel.close() def _acquire_remote_semaphore(): context.current().remote_semaphore.acquire() _global_remote_semaphore.acquire() def _release_remote_semaphore(): _global_remote_semaphore.release() context.current().remote_semaphore.release() def _get_proxied_http_session(proxy_command, host, port=None): session = requests.Session() adapter = ProxiedHTTPAdapter( _simple_exec_func(shlex.split(proxy_command)), host, port) session.mount('http://{0}:{1}'.format(host, adapter.port), adapter) return session def _get_proxy_gateway_http_session(gateway_host, gateway_username, gateway_private_key, host, port=None, proxy_command=None): session = requests.Session() adapter = ProxiedHTTPAdapter( _proxy_gateway_func(gateway_host, gateway_username, gateway_private_key, host, port, proxy_command), host, port) session.mount('http://{0}:{1}'.format(host, port), adapter) return session def _simple_exec_func(cmd): def func(): return e_subprocess.Popen(cmd, stdin=e_subprocess.PIPE, stdout=e_subprocess.PIPE, stderr=e_subprocess.PIPE) return func def _proxy_gateway_func(gateway_host, gateway_username, gateway_private_key, host, port, proxy_command): def func(): proc = procutils.start_subprocess() try: conn_params = (gateway_host, gateway_username, gateway_private_key, proxy_command, None, None) procutils.run_in_subprocess(proc, _connect, conn_params) cmd = "nc {host} {port}".format(host=host, port=port) procutils.run_in_subprocess( proc, _execute_command_interactive, (cmd,), interactive=True) return proc except Exception: with excutils.save_and_reraise_exception(): procutils.shutdown_subprocess(proc, _cleanup) return func class ProxiedHTTPAdapter(adapters.HTTPAdapter): def __init__(self, create_process_func, host, port): super(ProxiedHTTPAdapter, self).__init__() LOG.debug('HTTP adapter created for {host}:{port}'.format(host=host, port=port)) self.create_process_func = create_process_func self.port = port self.host = host def get_connection(self, url, proxies=None): pool_conn = ( super(ProxiedHTTPAdapter, self).get_connection(url, proxies)) if hasattr(pool_conn, '_get_conn'): http_conn = pool_conn._get_conn() if http_conn.sock is None: if hasattr(http_conn, 'connect'): sock = self._connect() LOG.debug('HTTP connection {connection} getting new ' 'netcat socket {socket}'.format( connection=http_conn, socket=sock)) http_conn.sock = sock else: if hasattr(http_conn.sock, 'is_netcat_socket'): LOG.debug('Pooled http connection has existing ' 'netcat socket. resetting pipe') http_conn.sock.reset() pool_conn._put_conn(http_conn) return pool_conn def close(self): LOG.debug('Closing HTTP adapter for {host}:{port}' .format(host=self.host, port=self.port)) super(ProxiedHTTPAdapter, self).close() def _connect(self): LOG.debug('Returning netcat socket for {host}:{port}' .format(host=self.host, port=self.port)) rootwrap_command = CONF.rootwrap_command if CONF.use_rootwrap else '' return NetcatSocket(self.create_process_func, rootwrap_command) class NetcatSocket(object): def _create_process(self): self.process = self.create_process_func() def __init__(self, create_process_func, rootwrap_command=None): self.create_process_func = create_process_func self.rootwrap_command = rootwrap_command self._create_process() def send(self, content): try: self.process.stdin.write(content) self.process.stdin.flush() except IOError as e: raise ex.SystemError(e) return len(content) def sendall(self, content): return self.send(content) def makefile(self, mode, *arg): if mode.startswith('r'): return self.process.stdout if mode.startswith('w'): return self.process.stdin raise ex.IncorrectStateError(_("Unknown file mode %s") % mode) def recv(self, size): try: return os.read(self.process.stdout.fileno(), size) except IOError as e: raise ex.SystemError(e) def _terminate(self): if self.rootwrap_command: os.system('{0} kill {1}'.format(self.rootwrap_command, # nosec self.process.pid)) else: self.process.terminate() def close(self): LOG.debug('Socket close called') self._terminate() def settimeout(self, timeout): pass def fileno(self): return self.process.stdin.fileno() def is_netcat_socket(self): return True def reset(self): self._terminate() self._create_process() class InstanceInteropHelper(remote.Remote): def __init__(self, instance): self.instance = instance def __enter__(self): _acquire_remote_semaphore() try: self.bulk = BulkInstanceInteropHelper(self.instance) return self.bulk except Exception: with excutils.save_and_reraise_exception(): _release_remote_semaphore() def __exit__(self, *exc_info): try: self.bulk.close() finally: _release_remote_semaphore() def get_neutron_info(self, instance=None): if not instance: instance = self.instance neutron_info = dict() neutron_info['network'] = instance.cluster.neutron_management_network ctx = context.current() neutron_info['token'] = context.get_auth_token() neutron_info['tenant'] = ctx.tenant_name neutron_info['host'] = _get_access_ip(instance) log_info = copy.deepcopy(neutron_info) del log_info['token'] LOG.debug('Returning neutron info: {info}'.format(info=log_info)) return neutron_info def _build_proxy_command(self, command, instance=None, port=None, info=None, rootwrap_command=None): # Accepted keywords in the proxy command template: # {host}, {port}, {tenant_id}, {network_id}, {router_id} keywords = {} if not info: info = self.get_neutron_info(instance) keywords['tenant_id'] = context.current().tenant_id keywords['network_id'] = info['network'] # Query Neutron only if needed if '{router_id}' in command: auth = trusts.get_os_admin_auth_plugin(instance.cluster) client = neutron.NeutronClient(info['network'], info['token'], info['tenant'], auth=auth) keywords['router_id'] = client.get_router() keywords['host'] = _get_access_ip(instance) keywords['port'] = port try: command = command.format(**keywords) except KeyError as e: LOG.error('Invalid keyword in proxy_command: {result}'.format( result=e)) # Do not give more details to the end-user raise ex.SystemError('Misconfiguration') if rootwrap_command: command = '{0} {1}'.format(rootwrap_command, command) return command def _get_conn_params(self): host_ng = self.instance.node_group cluster = host_ng.cluster access_instance = self.instance proxy_gateway_node = cluster.get_proxy_gateway_node() gateway_host = None gateway_image_username = None if proxy_gateway_node and not host_ng.is_proxy_gateway: # tmckay-fp in other words, if we are going to connect # through the proxy instead of the node we are actually # trying to reach # okay, the node group that supplies the proxy gateway # must have fps, but if a proxy is used the other # nodes are not required to have an fp. # so, this instance is assumed not to have a floating # ip and we are going to get to it through the proxy access_instance = proxy_gateway_node gateway_host = proxy_gateway_node.management_ip ng = proxy_gateway_node.node_group gateway_image_username = ng.image_username proxy_command = None if CONF.proxy_command: # Build a session through a user-defined socket proxy_command = CONF.proxy_command # tmckay-fp we have the node_group for the instance right here # okay, this test here whether access_instance.management_ip is an # fp -- just compare to internal? # in the neutron case, we check the node group for the # access_instance and look for fp elif CONF.use_namespaces and not net_utils.has_floating_ip( access_instance): # Build a session through a netcat socket in the Neutron namespace proxy_command = ( 'ip netns exec qrouter-{router_id} nc {host} {port}') # proxy_command is currently a template, turn it into a real command # i.e. dereference {host}, {port}, etc. if proxy_command: rootwrap = CONF.rootwrap_command if CONF.use_rootwrap else '' proxy_command = self._build_proxy_command( proxy_command, instance=access_instance, port=22, info=None, rootwrap_command=rootwrap) host_ip = _get_access_ip(self.instance) return (host_ip, host_ng.image_username, cluster.management_private_key, proxy_command, gateway_host, gateway_image_username) def _run(self, func, *args, **kwargs): proc = procutils.start_subprocess() try: procutils.run_in_subprocess(proc, _connect, self._get_conn_params()) return procutils.run_in_subprocess(proc, func, args, kwargs) except Exception: with excutils.save_and_reraise_exception(): procutils.shutdown_subprocess(proc, _cleanup) finally: procutils.shutdown_subprocess(proc, _cleanup) def _run_with_log(self, func, timeout, description, *args, **kwargs): start_time = time.time() try: with e_timeout.Timeout(timeout, ex.TimeoutException(timeout, op_name=description)): return self._run(func, *args, **kwargs) finally: self._log_command('"%s" took %.1f seconds to complete' % ( description, time.time() - start_time)) def _run_s(self, func, timeout, description, *args, **kwargs): timeout = _get_ssh_timeout(func, timeout) _acquire_remote_semaphore() try: return self._run_with_log(func, timeout, description, *args, **kwargs) finally: _release_remote_semaphore() def get_http_client(self, port, info=None): self._log_command('Retrieving HTTP session for {0}:{1}'.format( _get_access_ip(self.instance), port)) host_ng = self.instance.node_group cluster = host_ng.cluster access_instance = self.instance access_port = port proxy_gateway_node = cluster.get_proxy_gateway_node() gateway_host = None gateway_username = None gateway_private_key = None if proxy_gateway_node and not host_ng.is_proxy_gateway: access_instance = proxy_gateway_node access_port = 22 gateway_host = proxy_gateway_node.management_ip gateway_username = proxy_gateway_node.node_group.image_username gateway_private_key = cluster.management_private_key proxy_command = None if CONF.proxy_command: # Build a session through a user-defined socket proxy_command = CONF.proxy_command # tmckay-fp again we can check the node group for the instance # what are the implications for nova here? None. # This is a test on whether access_instance has a floating_ip # in the neutron case, we check the node group for the # access_instance and look for fp elif (CONF.use_namespaces and not net_utils.has_floating_ip( access_instance)): # need neutron info if not info: info = self.get_neutron_info(access_instance) # Build a session through a netcat socket in the Neutron namespace proxy_command = ( 'ip netns exec qrouter-{router_id} nc {host} {port}') # proxy_command is currently a template, turn it into a real command # i.e. dereference {host}, {port}, etc. if proxy_command: rootwrap = CONF.rootwrap_command if CONF.use_rootwrap else '' proxy_command = self._build_proxy_command( proxy_command, instance=access_instance, port=access_port, info=info, rootwrap_command=rootwrap) return _get_http_client(_get_access_ip(self.instance), port, proxy_command, gateway_host, gateway_username, gateway_private_key) def close_http_session(self, port): global _sessions host = _get_access_ip(self.instance) self._log_command(_("Closing HTTP session for %(host)s:%(port)s") % { 'host': host, 'port': port}) session = _sessions.get((host, port), None) if session is None: raise ex.NotFoundException( {'host': host, 'port': port}, _('Session for %(host)s:%(port)s not cached')) session.close() del _sessions[(host, port)] def execute_command(self, cmd, run_as_root=False, get_stderr=False, raise_when_error=True, timeout=None): description = _('Executing "%s"') % cmd self._log_command(description) return self._run_s(_execute_command, timeout, description, cmd, run_as_root, get_stderr, raise_when_error) def write_file_to(self, remote_file, data, run_as_root=False, timeout=None): description = _('Writing file "%s"') % remote_file self._log_command(description) self._run_s(_write_file_to, timeout, description, remote_file, data, run_as_root) def write_files_to(self, files, run_as_root=False, timeout=None): description = _('Writing files "%s"') % list(files) self._log_command(description) self._run_s(_write_files_to, timeout, description, files, run_as_root) def append_to_file(self, r_file, data, run_as_root=False, timeout=None): description = _('Appending to file "%s"') % r_file self._log_command(description) self._run_s(_append_to_file, timeout, description, r_file, data, run_as_root) def append_to_files(self, files, run_as_root=False, timeout=None): description = _('Appending to files "%s"') % list(files) self._log_command(description) self._run_s(_append_to_files, timeout, description, files, run_as_root) def prepend_to_file(self, r_file, data, run_as_root=False, timeout=None): description = _('Prepending to file "%s"') % r_file self._log_command(description) self._run_s(_prepend_to_file, timeout, description, r_file, data, run_as_root) def read_file_from(self, remote_file, run_as_root=False, timeout=None): description = _('Reading file "%s"') % remote_file self._log_command(description) return self._run_s(_read_file_from, timeout, description, remote_file, run_as_root) def get_python_version(self, timeout=None): return self._run_s( _get_python_to_execute, timeout, "get_python_version") def get_os_distrib(self, timeout=None): return self._run_s(_get_os_distrib, timeout, "get_os_distrib") def get_os_version(self, timeout=None): return self._run_s(_get_os_version, timeout, "get_os_version") def install_packages(self, packages, timeout=None): description = _('Installing packages "%s"') % list(packages) self._log_command(description) self._run_s(_install_packages, timeout, description, packages) def update_repository(self, timeout=None): description = _('Updating repository') self._log_command(description) self._run_s(_update_repository, timeout, description) def replace_remote_string(self, remote_file, old_str, new_str, timeout=None): description = _('In file "%(file)s" replacing string ' '"%(old_string)s" with "%(new_string)s"') % { "file": remote_file, "old_string": old_str, "new_string": new_str} self._log_command(description) self._run_s(_replace_remote_string, timeout, description, remote_file, old_str, new_str) def replace_remote_line(self, remote_file, old_line_with_start_string, new_line, timeout=None): description = _('In file "%(file)s" replacing line' ' beginning with string ' '"%(old_line_with_start_string)s"' ' with "%(new_line)s"') % { "file": remote_file, "old_line_with_start_string": old_line_with_start_string, "new_line": new_line} self._log_command(description) self._run_s(_replace_remote_line, timeout, description, remote_file, old_line_with_start_string, new_line) def execute_on_vm_interactive(self, cmd, matcher, timeout=None): """Runs given command and responds to prompts. 'cmd' is a command to execute. 'matcher' is an object which provides responses on command's prompts. It should have two methods implemented: * get_response(buf) - returns response on prompt if it is found in 'buf' string, which is a part of command output. If no prompt is found, the method should return None. * is_eof(buf) - returns True if current 'buf' indicates that the command is finished. False should be returned otherwise. """ description = _('Executing interactively "%s"') % cmd self._log_command(description) self._run_s(_execute_on_vm_interactive, timeout, description, cmd, matcher) def _log_command(self, str): with context.set_current_instance_id(self.instance.instance_id): LOG.debug(str) class BulkInstanceInteropHelper(InstanceInteropHelper): def __init__(self, instance): super(BulkInstanceInteropHelper, self).__init__(instance) self.proc = procutils.start_subprocess() try: procutils.run_in_subprocess(self.proc, _connect, self._get_conn_params()) except Exception: with excutils.save_and_reraise_exception(): procutils.shutdown_subprocess(self.proc, _cleanup) def close(self): procutils.shutdown_subprocess(self.proc, _cleanup) def _run(self, func, *args, **kwargs): return procutils.run_in_subprocess(self.proc, func, args, kwargs) def _run_s(self, func, timeout, description, *args, **kwargs): timeout = _get_ssh_timeout(func, timeout) return self._run_with_log(func, timeout, description, *args, **kwargs) class SshRemoteDriver(remote.RemoteDriver): def get_type_and_version(self): return "ssh.1.0" def setup_remote(self, engine): global _global_remote_semaphore global INFRA _global_remote_semaphore = semaphore.Semaphore( CONF.global_remote_threshold) INFRA = engine def get_remote(self, instance): return InstanceInteropHelper(instance) def get_userdata_template(self): # SSH does not need any instance customization return "" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/tempfiles.py0000664000175000017500000000220200000000000020015 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import shutil import tempfile from sahara import exceptions as ex from sahara.i18n import _ @contextlib.contextmanager def tempdir(**kwargs): argdict = kwargs.copy() if 'dir' not in argdict: argdict['dir'] = '/tmp/' tmpdir = tempfile.mkdtemp(**argdict) try: yield tmpdir finally: try: shutil.rmtree(tmpdir) except OSError as e: raise ex.SystemError( _("Failed to delete temp dir %(dir)s (reason: %(reason)s)") % {'dir': tmpdir, 'reason': e}) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/types.py0000664000175000017500000000530600000000000017201 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from sahara import exceptions as ex class FrozenList(list): def append(self, p_object): raise ex.FrozenClassError(self) def extend(self, iterable): raise ex.FrozenClassError(self) def insert(self, index, p_object): raise ex.FrozenClassError(self) def pop(self, index=None): raise ex.FrozenClassError(self) def remove(self, value): raise ex.FrozenClassError(self) def reverse(self): raise ex.FrozenClassError(self) def sort(self, cmp=None, key=None, reverse=False): raise ex.FrozenClassError(self) def __add__(self, y): raise ex.FrozenClassError(self) def __delitem__(self, y): raise ex.FrozenClassError(self) def __delslice__(self, i, j): raise ex.FrozenClassError(self) def __iadd__(self, y): raise ex.FrozenClassError(self) def __imul__(self, y): raise ex.FrozenClassError(self) def __setitem__(self, i, y): raise ex.FrozenClassError(self) def __setslice__(self, i, j, y): raise ex.FrozenClassError(self) class FrozenDict(dict): def clear(self): raise ex.FrozenClassError(self) def pop(self, k, d=None, force=False): if force: return super(FrozenDict, self).pop(k, d) raise ex.FrozenClassError(self) def popitem(self): raise ex.FrozenClassError(self) def setdefault(self, k, d=None): raise ex.FrozenClassError(self) def update(self, E=None, **F): raise ex.FrozenClassError(self) def __delitem__(self, y): raise ex.FrozenClassError(self) def __setitem__(self, i, y): raise ex.FrozenClassError(self) def is_int(s): try: int(s) return True except Exception: return False def transform_to_num(s): # s can be a string or non-string. try: return int(str(s)) except ValueError: try: return float(str(s)) except ValueError: return s class Page(list): def __init__(self, l, prev=None, next=None): super(Page, self).__init__(l) self.prev = prev self.next = next ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/wsgi.py0000664000175000017500000000502600000000000017005 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Only (de)serialization utils hasn't been removed to decrease requirements # number. """Utility methods for working with WSGI servers.""" import datetime from oslo_serialization import jsonutils import six from sahara import exceptions from sahara.i18n import _ class ActionDispatcher(object): """Maps method name to local methods through action name.""" def dispatch(self, *args, **kwargs): """Find and call local method.""" action = kwargs.pop('action', 'default') action_method = getattr(self, str(action), self.default) return action_method(*args, **kwargs) def default(self, data): raise NotImplementedError() class DictSerializer(ActionDispatcher): """Default request body serialization.""" def serialize(self, data, action='default'): return self.dispatch(data, action=action) def default(self, data): return "" class JSONDictSerializer(DictSerializer): """Default JSON request body serialization.""" def default(self, data): def sanitizer(obj): if isinstance(obj, datetime.datetime): _dtime = obj - datetime.timedelta(microseconds=obj.microsecond) return _dtime.isoformat() return six.text_type(obj) return jsonutils.dumps(data, default=sanitizer) class TextDeserializer(ActionDispatcher): """Default request body deserialization.""" def deserialize(self, datastring, action='default'): return self.dispatch(datastring, action=action) def default(self, datastring): return {} class JSONDeserializer(TextDeserializer): def _from_json(self, datastring): try: return jsonutils.loads(datastring) except ValueError: msg = _("cannot understand JSON") raise exceptions.MalformedRequestBody(msg) def default(self, datastring): return {'body': self._from_json(datastring)} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/utils/xmlutils.py0000664000175000017500000001356300000000000017722 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import xml.dom.minidom as xml import pkg_resources as pkg # hadoop.xml related utils def load_hadoop_xml_defaults(file_name, package='sahara'): doc = load_xml_document(file_name, package=package) configs = [] prop = doc.getElementsByTagName('property') for elements in prop: configs.append({ "name": get_text_from_node(elements, 'name'), "value": _adjust_field(get_text_from_node(elements, 'value')), "description": _adjust_field( get_text_from_node(elements, 'description')) }) return configs def parse_hadoop_xml_with_name_and_value(data): doc = xml.parseString(data) configs = [] prop = doc.getElementsByTagName('property') for elements in prop: configs.append({ 'name': get_text_from_node(elements, 'name'), 'value': get_text_from_node(elements, 'value') }) return configs def _get_node_element(element, name): element = element.getElementsByTagName(name) return element[0] if element and element[0].hasChildNodes() else None def create_hadoop_xml(configs, config_filter=None): doc = xml.Document() pi = doc.createProcessingInstruction('xml-stylesheet', 'type="text/xsl" ' 'href="configuration.xsl"') doc.insertBefore(pi, doc.firstChild) # Create the base element configuration = doc.createElement('configuration') doc.appendChild(configuration) default_configs = [] if config_filter is not None: default_configs = [cfg['name'] for cfg in config_filter] for name in sorted(configs): if name in default_configs or config_filter is None: add_property_to_configuration(doc, name, configs[name]) # Return newly created XML return doc.toprettyxml(indent=" ") def create_elements_xml(configs): doc = xml.Document() text = '' for name in sorted(configs): element = doc.createElement('property') add_text_element_to_element(doc, element, 'name', name) add_text_element_to_element(doc, element, 'value', configs[name]) text += element.toprettyxml(indent=" ") return text # basic utils def load_xml_document(file_name, strip=False, package='sahara'): fname = pkg.resource_filename(package, file_name) if strip: with open(fname, "r") as f: doc = "".join(line.strip() for line in f) return xml.parseString(doc) else: return xml.parse(fname) def get_text_from_node(element, name): element = element.getElementsByTagName(name) if element else None return element[0].firstChild.nodeValue if ( element and element[0].hasChildNodes()) else '' def _adjust_field(text): return re.sub(r"\n *|\t", "", str(text)) def add_property_to_configuration(doc, name, value): prop = add_child(doc, 'configuration', 'property') add_text_element_to_element(doc, prop, 'name', name) add_text_element_to_element(doc, prop, 'value', value) def add_properties_to_configuration(doc, parent_for_conf, configs): get_and_create_if_not_exist(doc, parent_for_conf, 'configuration') for n in sorted(filter(lambda x: x, configs)): add_property_to_configuration(doc, n, configs[n]) def add_child(doc, parent, tag_to_add): actions = doc.getElementsByTagName(parent) actions[0].appendChild(doc.createElement(tag_to_add)) return actions[0].lastChild def add_element(doc, parent, element): actions = doc.getElementsByTagName(parent) actions[0].appendChild(element) return actions[0].lastChild def get_and_create_if_not_exist(doc, parent, element): prop = doc.getElementsByTagName(element) if len(prop) != 0: return prop[0] return add_child(doc, parent, element) def add_text_element_to_tag(doc, parent_tag, element, value): prop = add_child(doc, parent_tag, element) prop.appendChild(doc.createTextNode(str(value))) def add_text_element_to_element(doc, parent, element, value): parent.appendChild(doc.createElement(element)) try: parent.lastChild.appendChild(doc.createTextNode(str(value))) except UnicodeEncodeError: parent.lastChild.appendChild(doc.createTextNode( str(value.encode('utf8')))) def add_equal_separated_dict(doc, parent_tag, each_elem_tag, value): for k in sorted(filter(lambda x: x, value)): if k: add_text_element_to_tag(doc, parent_tag, each_elem_tag, "%s=%s" % (k, value[k])) def add_attributes_to_element(doc, tag, attributes): element = doc.getElementsByTagName(tag)[0] for name, value in attributes.items(): element.setAttribute(name, value) def add_tagged_list(doc, parent_tag, each_elem_tag, values): for v in values: add_text_element_to_tag(doc, parent_tag, each_elem_tag, v) def get_property_dict(elem): res = {} properties = elem.getElementsByTagName('property') for prop in properties: k = get_text_from_node(prop, 'name') v = get_text_from_node(prop, 'value') res[k] = v return res def get_param_dict(elem): res = {} params = elem.getElementsByTagName('param') for param in params: k, v = param.firstChild.nodeValue.split('=') res[k] = v return res ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/sahara/version.py0000664000175000017500000000121500000000000016355 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from pbr import version version_info = version.VersionInfo('sahara') ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.685891 sahara-16.0.0/sahara.egg-info/0000775000175000017500000000000000000000000016011 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/PKG-INFO0000664000175000017500000000411000000000000017102 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: sahara Version: 16.0.0 Summary: Sahara project Home-page: https://docs.openstack.org/sahara/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: Apache Software License Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/sahara.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on OpenStack Data Processing ("Sahara") project ============================================ Sahara at wiki.openstack.org: https://wiki.openstack.org/wiki/Sahara Storyboard project: https://storyboard.openstack.org/#!/project/935 Sahara docs site: https://docs.openstack.org/sahara/latest/ Roadmap: https://wiki.openstack.org/wiki/Sahara/Roadmap Quickstart guide: https://docs.openstack.org/sahara/latest/user/quickstart.html How to participate: https://docs.openstack.org/sahara/latest/contributor/how-to-participate.html Source: https://opendev.org/openstack/sahara Bugs and feature requests: https://storyboard.openstack.org/#!/project/935 Release notes: https://docs.openstack.org/releasenotes/sahara/ License ------- Apache License Version 2.0 http://www.apache.org/licenses/LICENSE-2.0 Platform: UNKNOWN Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/SOURCES.txt0000664000175000017500000013422000000000000017677 0ustar00zuulzuul00000000000000.coveragerc .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst bandit.yaml bindep.txt lower-constraints.txt pylintrc requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/v1.1/cluster-templates.inc api-ref/source/v1.1/clusters.inc api-ref/source/v1.1/data-sources.inc api-ref/source/v1.1/event-log.inc api-ref/source/v1.1/image-registry.inc api-ref/source/v1.1/index.rst api-ref/source/v1.1/job-binaries.inc api-ref/source/v1.1/job-binary-internals.inc api-ref/source/v1.1/job-executions.inc api-ref/source/v1.1/job-types.inc api-ref/source/v1.1/jobs.inc api-ref/source/v1.1/node-group-templates.inc api-ref/source/v1.1/parameters.yaml api-ref/source/v1.1/plugins.inc api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-request.json api-ref/source/v1.1/samples/cluster-templates/cluster-template-create-response.json api-ref/source/v1.1/samples/cluster-templates/cluster-template-show-response.json api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-request.json api-ref/source/v1.1/samples/cluster-templates/cluster-template-update-response.json api-ref/source/v1.1/samples/cluster-templates/cluster-templates-list-response.json api-ref/source/v1.1/samples/clusters/cluster-create-request.json api-ref/source/v1.1/samples/clusters/cluster-create-response.json api-ref/source/v1.1/samples/clusters/cluster-scale-request.json api-ref/source/v1.1/samples/clusters/cluster-scale-response.json api-ref/source/v1.1/samples/clusters/cluster-show-response.json api-ref/source/v1.1/samples/clusters/cluster-update-request.json api-ref/source/v1.1/samples/clusters/cluster-update-response.json api-ref/source/v1.1/samples/clusters/clusters-list-response.json api-ref/source/v1.1/samples/clusters/multiple-clusters-create-request.json api-ref/source/v1.1/samples/clusters/multiple-clusters-create-response.json api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-request.json api-ref/source/v1.1/samples/data-sources/data-source-register-hdfs-response.json api-ref/source/v1.1/samples/data-sources/data-source-register-swift-request.json api-ref/source/v1.1/samples/data-sources/data-source-register-swift-response.json api-ref/source/v1.1/samples/data-sources/data-source-show-response.json api-ref/source/v1.1/samples/data-sources/data-source-update-request.json api-ref/source/v1.1/samples/data-sources/data-source-update-response.json api-ref/source/v1.1/samples/data-sources/data-sources-list-response.json api-ref/source/v1.1/samples/event-log/cluster-progress-response.json api-ref/source/v1.1/samples/image-registry/image-register-request.json api-ref/source/v1.1/samples/image-registry/image-register-response.json api-ref/source/v1.1/samples/image-registry/image-show-response.json api-ref/source/v1.1/samples/image-registry/image-tags-add-request.json api-ref/source/v1.1/samples/image-registry/image-tags-add-response.json api-ref/source/v1.1/samples/image-registry/image-tags-delete-request.json api-ref/source/v1.1/samples/image-registry/image-tags-delete-response.json api-ref/source/v1.1/samples/image-registry/images-list-response.json api-ref/source/v1.1/samples/job-binaries/create-request.json api-ref/source/v1.1/samples/job-binaries/create-response.json api-ref/source/v1.1/samples/job-binaries/list-response.json api-ref/source/v1.1/samples/job-binaries/show-data-response api-ref/source/v1.1/samples/job-binaries/show-response.json api-ref/source/v1.1/samples/job-binaries/update-request.json api-ref/source/v1.1/samples/job-binaries/update-response.json api-ref/source/v1.1/samples/job-binary-internals/create-response.json api-ref/source/v1.1/samples/job-binary-internals/list-response.json api-ref/source/v1.1/samples/job-binary-internals/show-data-response api-ref/source/v1.1/samples/job-binary-internals/show-response.json api-ref/source/v1.1/samples/job-binary-internals/update-request.json api-ref/source/v1.1/samples/job-binary-internals/update-response.json api-ref/source/v1.1/samples/job-executions/cancel-response.json api-ref/source/v1.1/samples/job-executions/job-ex-response.json api-ref/source/v1.1/samples/job-executions/job-ex-update-request.json api-ref/source/v1.1/samples/job-executions/job-ex-update-response.json api-ref/source/v1.1/samples/job-executions/list-response.json api-ref/source/v1.1/samples/job-types/job-types-list-response.json api-ref/source/v1.1/samples/jobs/job-create-request.json api-ref/source/v1.1/samples/jobs/job-create-response.json api-ref/source/v1.1/samples/jobs/job-execute-request.json api-ref/source/v1.1/samples/jobs/job-execute-response.json api-ref/source/v1.1/samples/jobs/job-show-response.json api-ref/source/v1.1/samples/jobs/job-update-request.json api-ref/source/v1.1/samples/jobs/job-update-response.json api-ref/source/v1.1/samples/jobs/jobs-list-response.json api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-request.json api-ref/source/v1.1/samples/node-group-templates/node-group-template-create-response.json api-ref/source/v1.1/samples/node-group-templates/node-group-template-show-response.json api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-request.json api-ref/source/v1.1/samples/node-group-templates/node-group-template-update-response.json api-ref/source/v1.1/samples/node-group-templates/node-group-templates-list-response.json api-ref/source/v1.1/samples/plugins/plugin-show-response.json api-ref/source/v1.1/samples/plugins/plugin-update-request.json api-ref/source/v1.1/samples/plugins/plugin-update-response.json api-ref/source/v1.1/samples/plugins/plugin-version-show-response.json api-ref/source/v1.1/samples/plugins/plugins-list-response.json api-ref/source/v2/cluster-templates.inc api-ref/source/v2/clusters.inc api-ref/source/v2/data-sources.inc api-ref/source/v2/event-log.inc api-ref/source/v2/image-registry.inc api-ref/source/v2/index.rst api-ref/source/v2/job-binaries.inc api-ref/source/v2/job-templates.inc api-ref/source/v2/job-types.inc api-ref/source/v2/jobs.inc api-ref/source/v2/node-group-templates.inc api-ref/source/v2/parameters.yaml api-ref/source/v2/plugins.inc api-ref/source/v2/samples/cluster-templates/cluster-template-create-request.json api-ref/source/v2/samples/cluster-templates/cluster-template-create-response.json api-ref/source/v2/samples/cluster-templates/cluster-template-show-response.json api-ref/source/v2/samples/cluster-templates/cluster-template-update-request.json api-ref/source/v2/samples/cluster-templates/cluster-template-update-response.json api-ref/source/v2/samples/cluster-templates/cluster-templates-list-response.json api-ref/source/v2/samples/clusters/cluster-create-request.json api-ref/source/v2/samples/clusters/cluster-create-response.json api-ref/source/v2/samples/clusters/cluster-scale-request.json api-ref/source/v2/samples/clusters/cluster-scale-response.json api-ref/source/v2/samples/clusters/cluster-show-response.json api-ref/source/v2/samples/clusters/cluster-update-request.json api-ref/source/v2/samples/clusters/cluster-update-response.json api-ref/source/v2/samples/clusters/clusters-list-response.json api-ref/source/v2/samples/clusters/multiple-clusters-create-request.json api-ref/source/v2/samples/clusters/multiple-clusters-create-response.json api-ref/source/v2/samples/data-sources/data-source-register-hdfs-request.json api-ref/source/v2/samples/data-sources/data-source-register-hdfs-response.json api-ref/source/v2/samples/data-sources/data-source-register-swift-request.json api-ref/source/v2/samples/data-sources/data-source-register-swift-response.json api-ref/source/v2/samples/data-sources/data-source-show-response.json api-ref/source/v2/samples/data-sources/data-source-update-request.json api-ref/source/v2/samples/data-sources/data-source-update-response.json api-ref/source/v2/samples/data-sources/data-sources-list-response.json api-ref/source/v2/samples/event-log/cluster-progress-response.json api-ref/source/v2/samples/image-registry/image-register-request.json api-ref/source/v2/samples/image-registry/image-register-response.json api-ref/source/v2/samples/image-registry/image-show-response.json api-ref/source/v2/samples/image-registry/image-tags-add-request.json api-ref/source/v2/samples/image-registry/image-tags-add-response.json api-ref/source/v2/samples/image-registry/image-tags-delete-request.json api-ref/source/v2/samples/image-registry/image-tags-delete-response.json api-ref/source/v2/samples/image-registry/images-list-response.json api-ref/source/v2/samples/job-binaries/create-request.json api-ref/source/v2/samples/job-binaries/create-response.json api-ref/source/v2/samples/job-binaries/list-response.json api-ref/source/v2/samples/job-binaries/show-data-response api-ref/source/v2/samples/job-binaries/show-response.json api-ref/source/v2/samples/job-binaries/update-request.json api-ref/source/v2/samples/job-binaries/update-response.json api-ref/source/v2/samples/job-templates/job-template-create-request.json api-ref/source/v2/samples/job-templates/job-template-create-response.json api-ref/source/v2/samples/job-templates/job-template-show-response.json api-ref/source/v2/samples/job-templates/job-template-update-request.json api-ref/source/v2/samples/job-templates/job-template-update-response.json api-ref/source/v2/samples/job-templates/job-templates-list-response.json api-ref/source/v2/samples/job-types/job-types-list-response.json api-ref/source/v2/samples/jobs/cancel-response.json api-ref/source/v2/samples/jobs/job-request.json api-ref/source/v2/samples/jobs/job-response.json api-ref/source/v2/samples/jobs/job-update-request.json api-ref/source/v2/samples/jobs/job-update-response.json api-ref/source/v2/samples/jobs/list-response.json api-ref/source/v2/samples/node-group-templates/node-group-template-create-request.json api-ref/source/v2/samples/node-group-templates/node-group-template-create-response.json api-ref/source/v2/samples/node-group-templates/node-group-template-show-response.json api-ref/source/v2/samples/node-group-templates/node-group-template-update-request.json api-ref/source/v2/samples/node-group-templates/node-group-template-update-response.json api-ref/source/v2/samples/node-group-templates/node-group-templates-list-response.json api-ref/source/v2/samples/plugins/plugin-show-response.json api-ref/source/v2/samples/plugins/plugin-update-request.json api-ref/source/v2/samples/plugins/plugin-update-response.json api-ref/source/v2/samples/plugins/plugin-version-show-response.json api-ref/source/v2/samples/plugins/plugins-list-response.json devstack/README.rst devstack/exercise.sh devstack/plugin.sh devstack/settings devstack/files/apache-sahara-api.template devstack/upgrade/resources.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh devstack/upgrade/from-liberty/upgrade-sahara devstack/upgrade/from-mitaka/upgrade-sahara devstack/upgrade/from-rocky/upgrade-sahara doc/requirements.txt doc/source/conf.py doc/source/config-generator.conf doc/source/index.rst doc/source/_extra/.htaccess doc/source/_templates/sidebarlinks.html doc/source/_theme_rtd/layout.html doc/source/_theme_rtd/theme.conf doc/source/admin/advanced-configuration-guide.rst doc/source/admin/configs-recommendations.rst doc/source/admin/configuration-guide.rst doc/source/admin/index.rst doc/source/admin/upgrade-guide.rst doc/source/cli/index.rst doc/source/cli/sahara-status.rst doc/source/configuration/descriptionconfig.rst doc/source/configuration/index.rst doc/source/configuration/sampleconfig.rst doc/source/contributor/adding-database-migrations.rst doc/source/contributor/apiv2.rst doc/source/contributor/contributing.rst doc/source/contributor/dashboard-dev-environment-guide.rst doc/source/contributor/development-environment.rst doc/source/contributor/development-guidelines.rst doc/source/contributor/devstack.rst doc/source/contributor/gerrit.rst doc/source/contributor/how-to-build-oozie.rst doc/source/contributor/image-gen.rst doc/source/contributor/index.rst doc/source/contributor/jenkins.rst doc/source/contributor/log-guidelines.rst doc/source/contributor/testing.rst doc/source/images/hadoop-cluster-example.jpg doc/source/images/openstack-interop.png doc/source/images/sahara-architecture.svg doc/source/install/dashboard-guide.rst doc/source/install/index.rst doc/source/install/installation-guide.rst doc/source/intro/architecture.rst doc/source/intro/index.rst doc/source/intro/overview.rst doc/source/reference/edp-spi.rst doc/source/reference/index.rst doc/source/reference/plugin-spi.rst doc/source/reference/plugins.rst doc/source/reference/restapi.rst doc/source/user/building-guest-images.rst doc/source/user/dashboard-user-guide.rst doc/source/user/edp-s3.rst doc/source/user/edp.rst doc/source/user/features.rst doc/source/user/hadoop-swift.rst doc/source/user/index.rst doc/source/user/overview.rst doc/source/user/plugins.rst doc/source/user/quickstart.rst doc/source/user/registering-image.rst doc/source/user/sahara-on-ironic.rst doc/source/user/statuses.rst doc/source/user/building-guest-images/baremetal.rst doc/source/user/building-guest-images/sahara-image-create.rst doc/source/user/building-guest-images/sahara-image-pack.rst doc/test/redirect-tests.txt etc/edp-examples/README.rst etc/sahara/README-sahara.conf.txt etc/sahara/api-paste.ini etc/sahara/compute.topology.sample etc/sahara/rootwrap.conf etc/sahara/swift.topology.sample etc/sahara/rootwrap.d/sahara.filters etc/sudoers.d/sahara-rootwrap playbooks/buildimages/run.yaml releasenotes/notes/.placeholder releasenotes/notes/add-impala-2.2-c1649599649aff5c.yaml releasenotes/notes/add-mapr-520-3ed6cd0ae9688e17.yaml releasenotes/notes/add-mapr-kafka-3a808bbc1aa21055.yaml releasenotes/notes/add-mapr-sentry-6012c08b55d679de.yaml releasenotes/notes/add-scheduler-edp-job-9eda17dd174e53fa.yaml releasenotes/notes/add-storm-version-1_1_0-3e10b34824706a62.yaml releasenotes/notes/add-upgrade-check-framework-9cd18dbc47b0efbd.yaml releasenotes/notes/add-wsgi-server-support-c8fbc3d76d4e42f6.yaml releasenotes/notes/add_kafka_in_cdh-774c7c051480c892.yaml releasenotes/notes/add_mapr_repo_configs-04af1a67350bfd24.yaml releasenotes/notes/ambari-agent-pkg-install-timeout-param-d50e5c15e06fa51e.yaml releasenotes/notes/ambari-downscaling-b9ba759ce9c7325e.yaml releasenotes/notes/ambari-hive-92b911e0a759ee88.yaml releasenotes/notes/ambari-server-start-856403bc280dfba3.yaml releasenotes/notes/ambari26-image-pack-88c9aad59bf635b2.yaml releasenotes/notes/ambari_2_4_image_generation_validation-47eabb9fa90384c8.yaml releasenotes/notes/api-insecure-cbd4fd5da71b29a3.yaml releasenotes/notes/api-v2-return-payload-a84a609db410228a.yaml releasenotes/notes/apiv2-microversion-4c1a58ee8090e5a9.yaml releasenotes/notes/apiv2-payload-tweaks-b73c20a35263d958.yaml releasenotes/notes/apiv2-preview-release-b1ee8cc9b2fb01da.yaml releasenotes/notes/apiv2-stable-release-25ba9920c8e4632a.yaml releasenotes/notes/auto_configs_for_hdp-011d460d37dcdf02.yaml releasenotes/notes/boot-from-volume-e7078452fac1a4a0.yaml releasenotes/notes/ca-cert-fix-5c434a82f9347039.yaml releasenotes/notes/cdh-5-5-35e582e149a05632.yaml releasenotes/notes/cdh-513-bdce0d5d269d8f20.yaml releasenotes/notes/cdh-labels-5695d95bce226051.yaml releasenotes/notes/cdh_5_11_0_image_generation_validation-6334ef6d04950935.yaml releasenotes/notes/cdh_5_11_support-10d4abb91bc4475f.yaml releasenotes/notes/cdh_5_7_image_generation_validation-308e7529a9018663.yaml releasenotes/notes/cdh_5_7_support-9522cb9b4dce2378.yaml releasenotes/notes/cdh_5_9_0_image_generation_validation-19d10e6468e30b4f.yaml releasenotes/notes/cdh_5_9_support-b603a2648b2e7b32.yaml releasenotes/notes/config-groups-ambari-837de6d33eb0fa87.yaml releasenotes/notes/consolidate-cluster-creation-apiv2-5d5aceeb2e97c702.yaml releasenotes/notes/convert-to-cluster-template-43d502496d18625e.yaml releasenotes/notes/deprecate-cdh_5_5-0da56b562170566f.yaml releasenotes/notes/deprecate-hdp-a9ff0ecf6006da49.yaml releasenotes/notes/deprecate-json-formatted-policy-file-b267f288cba7e325.yaml releasenotes/notes/deprecate-mapr-51-090423438e3dda20.yaml releasenotes/notes/deprecate-plugin-vanilla260-46e4b8fe96e8fe68.yaml releasenotes/notes/deprecate-sahara-all-entry-point-1446a00dab643b7b.yaml releasenotes/notes/deprecate-spark-version-131-98eccc79b13b6b8f.yaml releasenotes/notes/deprecate-storm-version-092.yaml-b9ff2b9ebbb983fc.yaml releasenotes/notes/designate-integration-784c5f7f29546015.yaml releasenotes/notes/drop-py-2-7-bc282e43b26fbf17.yaml releasenotes/notes/enable-mutable-configuration-2dd6b7a0e0fe4437.yaml releasenotes/notes/engine-opt-258ff1ae9b04d628.yaml releasenotes/notes/enhance-bfv-12bac06c4438675f.yaml releasenotes/notes/event_log_for_hdp-a114511c477ef16d.yaml releasenotes/notes/fix-install-provision-events-c1bd2e05bf2be6bd.yaml releasenotes/notes/fixing-policy-inconsistencies-984020000cc3882a.yaml releasenotes/notes/force-delete-apiv2-e372392bbc8639f8.yaml releasenotes/notes/force-delete-changes-2e0881a99742c339.yaml releasenotes/notes/hadoop-swift-domain-fix-c1dfdf6c52b5aa25.yaml releasenotes/notes/hadoop-swift-jar-for-ambari-4439913b01d42468.yaml releasenotes/notes/hdfs-dfs-94a9c4f64cf8994f.yaml releasenotes/notes/hdp-removed-from-defaults-31d1e1f15973b682.yaml releasenotes/notes/hdp25-b35ef99c240fc127.yaml releasenotes/notes/hdp26-5a406d7066706bf1.yaml releasenotes/notes/healthcheck-02e429a3ffcd9482.yaml releasenotes/notes/honor-endpoint-type-neutron-4583128c383d9745.yaml releasenotes/notes/ironic-support-79e7ecad05f54029.yaml releasenotes/notes/kerberos-76dd297462b7337c.yaml releasenotes/notes/key_manager_integration-e32d141809c8cc46.yaml releasenotes/notes/keypair-replacement-0c0cc3db0551c112.yaml releasenotes/notes/keystoneclient-to-keystonauth-migration-c75988975ad1a506.yaml releasenotes/notes/mapr-health-check-2eba3d742a2b853f.yaml releasenotes/notes/mapr-labels-5cc318616db59403.yaml releasenotes/notes/mapr-remove-spark-standalone-293ca864de9a7848.yaml releasenotes/notes/mapr-services-new-versions-b32c2e8fe07d1600.yaml releasenotes/notes/mapr-services-new-versions-dc7652e33f26bbdc.yaml releasenotes/notes/mapr5.2.0-image-gen-c850e74977b00abe.yaml releasenotes/notes/neutron-default-a6baf93d857d86b3.yaml releasenotes/notes/nova-network-removal-debe306fd7c61268.yaml releasenotes/notes/novaclient_images_to_glanceclient-0266a2bd92b4be05.yaml releasenotes/notes/ntp-config-51ed9d612132e2fa.yaml releasenotes/notes/optional-project-id-apiv1-2e89756f6f16bd5e.yaml releasenotes/notes/options-to-oslo_messaging_notifications-cee206fc4f74c217.yaml releasenotes/notes/plugins-split-from-sahara-core-9ffc5e5d06c9239c.yaml releasenotes/notes/policy_in_code-5847902775ff9861.yaml releasenotes/notes/proxy-user-lowercase-f116f7b7e89274cb.yaml releasenotes/notes/rack_awareness_for_cdh-e0cd5d4ab46aa1b5.yaml releasenotes/notes/rack_awareness_for_hdp-6e3d44468cc141a5.yaml releasenotes/notes/refactor-floating-ips-logic-9d37d9297f3621b3.yaml releasenotes/notes/remove-cdh_5.0_5.3_5.4-b5f140e9b0233c07.yaml releasenotes/notes/remove-hard-coded-oozie-password-b97475c8772aa1bd.yaml releasenotes/notes/remove-hardcoded-password-from-hive-eb923b518974e853.yaml releasenotes/notes/remove-hdp-137d0ad3d2389b7a.yaml releasenotes/notes/remove-mapr-500-3df3041be99a864c.yaml releasenotes/notes/remove-spark-100-44f3d5efc3806410.yaml releasenotes/notes/remove-upload-oozie-sharelib-step-in-vanilla-2.8.2-546b2026e2f5d557.yaml releasenotes/notes/remove-use-neutron-2499b661dce041d4.yaml releasenotes/notes/remove_custom_auth_domainname-984fd2d931e306cc.yaml releasenotes/notes/remove_enable_notifications_opt-4c0d46e8e79eb06f.yaml releasenotes/notes/s3-datasource-protocol-d3abd0b22f653b3b.yaml releasenotes/notes/sahara-cfg-location-change-7b61454311b16ce8.yaml releasenotes/notes/sahara-endpoint-version-discovery-826e9f31093cb10f.yaml releasenotes/notes/some-polish-api-v2-2d2e390a74b088f9.yaml releasenotes/notes/spark-2.2-d7c3a84bd52f735a.yaml releasenotes/notes/spark-2.3-0277fe9feae6668a.yaml releasenotes/notes/storm-1.2-af75fedb413de56a.yaml releasenotes/notes/strict-validation-query-string-a6cadbf2f9c57d06.yaml releasenotes/notes/substring-matching-1d5981b8e5b1d919.yaml releasenotes/notes/support-s3-data-source-a912e2cdf4cd51fb.yaml releasenotes/notes/support-s3-job-binary-6d91267ae11d09d3.yaml releasenotes/notes/transport_url-5bbbf0bb54d81727.yaml releasenotes/notes/trustee-conf-section-5994dcd48a9744d7.yaml releasenotes/notes/updating-plugins-versions-b8d27764178c3cdd.yaml releasenotes/notes/vanilla-2.7.5-support-ffeeb88fc4be34b4.yaml releasenotes/notes/vanilla-2.8.2-support-84c89aad31105584.yaml releasenotes/notes/zookeeper-configuration-steps-48c3d9706c86f227.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/mitaka.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/wallaby.rst releasenotes/source/xena.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/it/LC_MESSAGES/releasenotes.po roles/build-sahara-images-cli/README.rst roles/build-sahara-images-cli/defaults/main.yaml roles/build-sahara-images-cli/tasks/main.yaml sahara/__init__.py sahara/config.py sahara/context.py sahara/exceptions.py sahara/i18n.py sahara/main.py sahara/version.py sahara.egg-info/PKG-INFO sahara.egg-info/SOURCES.txt sahara.egg-info/dependency_links.txt sahara.egg-info/entry_points.txt sahara.egg-info/not-zip-safe sahara.egg-info/pbr.json sahara.egg-info/requires.txt sahara.egg-info/top_level.txt sahara/api/__init__.py sahara/api/acl.py sahara/api/base.py sahara/api/microversion.py sahara/api/v10.py sahara/api/v11.py sahara/api/middleware/__init__.py sahara/api/middleware/auth_valid.py sahara/api/middleware/sahara_middleware.py sahara/api/middleware/version_discovery.py sahara/api/v2/__init__.py sahara/api/v2/cluster_templates.py sahara/api/v2/clusters.py sahara/api/v2/data_sources.py sahara/api/v2/images.py sahara/api/v2/job_binaries.py sahara/api/v2/job_templates.py sahara/api/v2/job_types.py sahara/api/v2/jobs.py sahara/api/v2/node_group_templates.py sahara/api/v2/plugins.py sahara/cli/__init__.py sahara/cli/sahara_all.py sahara/cli/sahara_api.py sahara/cli/sahara_engine.py sahara/cli/sahara_status.py sahara/cli/sahara_subprocess.py sahara/cli/image_pack/__init__.py sahara/cli/image_pack/api.py sahara/cli/image_pack/cli.py sahara/common/__init__.py sahara/common/config.py sahara/common/policies/__init__.py sahara/common/policies/base.py sahara/common/policies/cluster.py sahara/common/policies/cluster_template.py sahara/common/policies/cluster_templates.py sahara/common/policies/clusters.py sahara/common/policies/data_source.py sahara/common/policies/data_sources.py sahara/common/policies/image.py sahara/common/policies/images.py sahara/common/policies/job.py sahara/common/policies/job_binaries.py sahara/common/policies/job_binary.py sahara/common/policies/job_binary_internals.py sahara/common/policies/job_executions.py sahara/common/policies/job_template.py sahara/common/policies/job_type.py sahara/common/policies/job_types.py sahara/common/policies/jobs.py sahara/common/policies/node_group_template.py sahara/common/policies/node_group_templates.py sahara/common/policies/plugin.py sahara/common/policies/plugins.py sahara/conductor/__init__.py sahara/conductor/api.py sahara/conductor/manager.py sahara/conductor/objects.py sahara/conductor/resource.py sahara/db/__init__.py sahara/db/api.py sahara/db/base.py sahara/db/migration/__init__.py sahara/db/migration/alembic.ini sahara/db/migration/cli.py sahara/db/migration/alembic_migrations/README.md sahara/db/migration/alembic_migrations/env.py sahara/db/migration/alembic_migrations/script.py.mako sahara/db/migration/alembic_migrations/versions/001_icehouse.py sahara/db/migration/alembic_migrations/versions/002_placeholder.py sahara/db/migration/alembic_migrations/versions/003_placeholder.py sahara/db/migration/alembic_migrations/versions/004_placeholder.py sahara/db/migration/alembic_migrations/versions/005_placeholder.py sahara/db/migration/alembic_migrations/versions/006_placeholder.py sahara/db/migration/alembic_migrations/versions/007_increase_status_description_size.py sahara/db/migration/alembic_migrations/versions/008_security_groups.py sahara/db/migration/alembic_migrations/versions/009_rollback_info.py sahara/db/migration/alembic_migrations/versions/010_auto_security_groups.py sahara/db/migration/alembic_migrations/versions/011_sahara_info.py sahara/db/migration/alembic_migrations/versions/012_availability_zone.py sahara/db/migration/alembic_migrations/versions/013_volumes_availability_zone.py sahara/db/migration/alembic_migrations/versions/014_add_volume_type.py sahara/db/migration/alembic_migrations/versions/015_add_events_objects.py sahara/db/migration/alembic_migrations/versions/016_is_proxy_gateway.py sahara/db/migration/alembic_migrations/versions/017_drop_progress.py sahara/db/migration/alembic_migrations/versions/018_volume_local_to_instance.py sahara/db/migration/alembic_migrations/versions/019_is_default_for_templates.py sahara/db/migration/alembic_migrations/versions/020_remove_redandunt_progress_ops.py sahara/db/migration/alembic_migrations/versions/021_datasource_placeholders.py sahara/db/migration/alembic_migrations/versions/022_add_job_interface.py sahara/db/migration/alembic_migrations/versions/023_add_use_autoconfig.py sahara/db/migration/alembic_migrations/versions/024_manila_shares.py sahara/db/migration/alembic_migrations/versions/025_increase_ip_column_size.py sahara/db/migration/alembic_migrations/versions/026_add_is_public_is_protected.py sahara/db/migration/alembic_migrations/versions/027_rename_oozie_job_id.py sahara/db/migration/alembic_migrations/versions/028_storage_devices_number.py sahara/db/migration/alembic_migrations/versions/029_set_is_protected_on_is_default.py sahara/db/migration/alembic_migrations/versions/030-health-check.py sahara/db/migration/alembic_migrations/versions/031_added_plugins_table.py sahara/db/migration/alembic_migrations/versions/032_add_domain_name.py sahara/db/migration/alembic_migrations/versions/033_add_anti_affinity_ratio_field_to_cluster.py sahara/db/migration/alembic_migrations/versions/034_boot_from_volume.py sahara/db/migration/alembic_migrations/versions/035_boot_from_volume_enhancements.py sahara/db/sqlalchemy/__init__.py sahara/db/sqlalchemy/api.py sahara/db/sqlalchemy/model_base.py sahara/db/sqlalchemy/models.py sahara/db/sqlalchemy/types.py sahara/db/templates/README.rst sahara/db/templates/__init__.py sahara/db/templates/api.py sahara/db/templates/cli.py sahara/db/templates/utils.py sahara/locale/de/LC_MESSAGES/sahara.po sahara/plugins/__init__.py sahara/plugins/base.py sahara/plugins/castellan_utils.py sahara/plugins/conductor.py sahara/plugins/context.py sahara/plugins/db.py sahara/plugins/edp.py sahara/plugins/exceptions.py sahara/plugins/health_check_base.py sahara/plugins/images.py sahara/plugins/kerberos.py sahara/plugins/labels.py sahara/plugins/main.py sahara/plugins/objects.py sahara/plugins/opts.py sahara/plugins/provisioning.py sahara/plugins/recommendations_utils.py sahara/plugins/resource.py sahara/plugins/service_api.py sahara/plugins/swift_helper.py sahara/plugins/swift_utils.py sahara/plugins/testutils.py sahara/plugins/topology_helper.py sahara/plugins/utils.py sahara/plugins/default_templates/template.conf sahara/plugins/default_templates/ambari/v2_3/cluster.json sahara/plugins/default_templates/ambari/v2_3/master-edp.json sahara/plugins/default_templates/ambari/v2_3/master.json sahara/plugins/default_templates/ambari/v2_3/worker.json sahara/plugins/default_templates/ambari/v2_4/cluster.json sahara/plugins/default_templates/ambari/v2_4/master-edp.json sahara/plugins/default_templates/ambari/v2_4/master.json sahara/plugins/default_templates/ambari/v2_4/worker.json sahara/plugins/default_templates/ambari/v2_5/cluster.json sahara/plugins/default_templates/ambari/v2_5/master-edp.json sahara/plugins/default_templates/ambari/v2_5/master.json sahara/plugins/default_templates/ambari/v2_5/worker.json sahara/plugins/default_templates/cdh/v5_5_0/cluster.json sahara/plugins/default_templates/cdh/v5_5_0/manager.json sahara/plugins/default_templates/cdh/v5_5_0/master-additional.json sahara/plugins/default_templates/cdh/v5_5_0/master-core.json sahara/plugins/default_templates/cdh/v5_5_0/worker-nm-dn.json sahara/plugins/default_templates/cdh/v5_7_0/cluster.json sahara/plugins/default_templates/cdh/v5_7_0/manager.json sahara/plugins/default_templates/cdh/v5_7_0/master-additional.json sahara/plugins/default_templates/cdh/v5_7_0/master-core.json sahara/plugins/default_templates/cdh/v5_7_0/worker-nm-dn.json sahara/plugins/default_templates/cdh/v5_9_0/cluster.json sahara/plugins/default_templates/cdh/v5_9_0/manager.json sahara/plugins/default_templates/cdh/v5_9_0/master-additional.json sahara/plugins/default_templates/cdh/v5_9_0/master-core.json sahara/plugins/default_templates/cdh/v5_9_0/worker-nm-dn.json sahara/plugins/default_templates/mapr/5_0_0_mrv2/cluster.json sahara/plugins/default_templates/mapr/5_0_0_mrv2/master.json sahara/plugins/default_templates/mapr/5_0_0_mrv2/worker.json sahara/plugins/default_templates/mapr/v5_1_0_mrv2/cluster.json sahara/plugins/default_templates/mapr/v5_1_0_mrv2/master.json sahara/plugins/default_templates/mapr/v5_1_0_mrv2/worker.json sahara/plugins/default_templates/mapr/v5_2_0_mrv2/cluster.json sahara/plugins/default_templates/mapr/v5_2_0_mrv2/master.json sahara/plugins/default_templates/mapr/v5_2_0_mrv2/worker.json sahara/plugins/default_templates/spark/v1_3_1/cluster.json sahara/plugins/default_templates/spark/v1_3_1/master.json sahara/plugins/default_templates/spark/v1_3_1/slave.json sahara/plugins/default_templates/spark/v1_6_0/cluster.json sahara/plugins/default_templates/spark/v1_6_0/master.json sahara/plugins/default_templates/spark/v1_6_0/slave.json sahara/plugins/default_templates/spark/v2_1_0/cluster.json sahara/plugins/default_templates/spark/v2_1_0/master.json sahara/plugins/default_templates/spark/v2_1_0/slave.json sahara/plugins/default_templates/storm/v1_0_1/cluster.json sahara/plugins/default_templates/storm/v1_0_1/master.json sahara/plugins/default_templates/storm/v1_0_1/slave.json sahara/plugins/default_templates/storm/v1_1_0/cluster.json sahara/plugins/default_templates/storm/v1_1_0/master.json sahara/plugins/default_templates/storm/v1_1_0/slave.json sahara/plugins/default_templates/vanilla/v2_7_1/cluster.json sahara/plugins/default_templates/vanilla/v2_7_1/master.json sahara/plugins/default_templates/vanilla/v2_7_1/worker.json sahara/plugins/fake/__init__.py sahara/plugins/fake/edp_engine.py sahara/plugins/fake/plugin.py sahara/plugins/resources/create-principal-keytab sahara/plugins/resources/cron-file sahara/plugins/resources/cron-script sahara/plugins/resources/kdc_conf sahara/plugins/resources/kdc_conf_redhat sahara/plugins/resources/krb-client-init.sh.template sahara/plugins/resources/krb5_config sahara/plugins/resources/mit-kdc-server-init.sh.template sahara/service/__init__.py sahara/service/coordinator.py sahara/service/engine.py sahara/service/networks.py sahara/service/ntp_service.py sahara/service/ops.py sahara/service/periodic.py sahara/service/quotas.py sahara/service/sessions.py sahara/service/trusts.py sahara/service/validation.py sahara/service/volumes.py sahara/service/api/__init__.py sahara/service/api/v10.py sahara/service/api/v11.py sahara/service/api/v2/__init__.py sahara/service/api/v2/cluster_templates.py sahara/service/api/v2/clusters.py sahara/service/api/v2/data_sources.py sahara/service/api/v2/images.py sahara/service/api/v2/job_binaries.py sahara/service/api/v2/job_templates.py sahara/service/api/v2/job_types.py sahara/service/api/v2/jobs.py sahara/service/api/v2/node_group_templates.py sahara/service/api/v2/plugins.py sahara/service/castellan/__init__.py sahara/service/castellan/config.py sahara/service/castellan/sahara_key_manager.py sahara/service/castellan/utils.py sahara/service/edp/__init__.py sahara/service/edp/base_engine.py sahara/service/edp/hdfs_helper.py sahara/service/edp/job_manager.py sahara/service/edp/job_utils.py sahara/service/edp/s3_common.py sahara/service/edp/shares.py sahara/service/edp/binary_retrievers/__init__.py sahara/service/edp/binary_retrievers/dispatch.py sahara/service/edp/binary_retrievers/internal_swift.py sahara/service/edp/binary_retrievers/manila_share.py sahara/service/edp/binary_retrievers/s3_storage.py sahara/service/edp/binary_retrievers/sahara_db.py sahara/service/edp/data_sources/__init__.py sahara/service/edp/data_sources/base.py sahara/service/edp/data_sources/manager.py sahara/service/edp/data_sources/opts.py sahara/service/edp/data_sources/hdfs/__init__.py sahara/service/edp/data_sources/hdfs/implementation.py sahara/service/edp/data_sources/manila/__init__.py sahara/service/edp/data_sources/manila/implementation.py sahara/service/edp/data_sources/maprfs/__init__.py sahara/service/edp/data_sources/maprfs/implementation.py sahara/service/edp/data_sources/s3/__init__.py sahara/service/edp/data_sources/s3/implementation.py sahara/service/edp/data_sources/swift/__init__.py sahara/service/edp/data_sources/swift/implementation.py sahara/service/edp/job_binaries/__init__.py sahara/service/edp/job_binaries/base.py sahara/service/edp/job_binaries/manager.py sahara/service/edp/job_binaries/opts.py sahara/service/edp/job_binaries/internal_db/__init__.py sahara/service/edp/job_binaries/internal_db/implementation.py sahara/service/edp/job_binaries/manila/__init__.py sahara/service/edp/job_binaries/manila/implementation.py sahara/service/edp/job_binaries/s3/__init__.py sahara/service/edp/job_binaries/s3/implementation.py sahara/service/edp/job_binaries/swift/__init__.py sahara/service/edp/job_binaries/swift/implementation.py sahara/service/edp/oozie/__init__.py sahara/service/edp/oozie/engine.py sahara/service/edp/oozie/oozie.py sahara/service/edp/oozie/workflow_creator/__init__.py sahara/service/edp/oozie/workflow_creator/base_workflow.py sahara/service/edp/oozie/workflow_creator/hive_workflow.py sahara/service/edp/oozie/workflow_creator/java_workflow.py sahara/service/edp/oozie/workflow_creator/mapreduce_workflow.py sahara/service/edp/oozie/workflow_creator/pig_workflow.py sahara/service/edp/oozie/workflow_creator/shell_workflow.py sahara/service/edp/oozie/workflow_creator/workflow_factory.py sahara/service/edp/resources/edp-main-wrapper.jar sahara/service/edp/resources/edp-spark-wrapper.jar sahara/service/edp/resources/hive-default.xml sahara/service/edp/resources/launch_command.py sahara/service/edp/resources/mapred-default.xml sahara/service/edp/resources/mapred-job-config.xml sahara/service/edp/resources/workflow.xml sahara/service/edp/spark/__init__.py sahara/service/edp/spark/engine.py sahara/service/edp/storm/__init__.py sahara/service/edp/storm/engine.py sahara/service/edp/utils/__init__.py sahara/service/edp/utils/shares.py sahara/service/health/__init__.py sahara/service/health/common.py sahara/service/health/verification_base.py sahara/service/heat/__init__.py sahara/service/heat/commons.py sahara/service/heat/heat_engine.py sahara/service/heat/templates.py sahara/service/validations/__init__.py sahara/service/validations/acl.py sahara/service/validations/base.py sahara/service/validations/cluster_template_schema.py sahara/service/validations/cluster_templates.py sahara/service/validations/clusters.py sahara/service/validations/clusters_scaling.py sahara/service/validations/clusters_schema.py sahara/service/validations/images.py sahara/service/validations/node_group_template_schema.py sahara/service/validations/node_group_templates.py sahara/service/validations/plugins.py sahara/service/validations/shares.py sahara/service/validations/edp/__init__.py sahara/service/validations/edp/base.py sahara/service/validations/edp/data_source.py sahara/service/validations/edp/data_source_schema.py sahara/service/validations/edp/job.py sahara/service/validations/edp/job_binary.py sahara/service/validations/edp/job_binary_internal.py sahara/service/validations/edp/job_binary_internal_schema.py sahara/service/validations/edp/job_binary_schema.py sahara/service/validations/edp/job_execution.py sahara/service/validations/edp/job_execution_schema.py sahara/service/validations/edp/job_interface.py sahara/service/validations/edp/job_schema.py sahara/swift/__init__.py sahara/swift/swift_helper.py sahara/swift/utils.py sahara/swift/resources/conf-template.xml sahara/tests/README.rst sahara/tests/__init__.py sahara/tests/unit/__init__.py sahara/tests/unit/base.py sahara/tests/unit/test_context.py sahara/tests/unit/test_exceptions.py sahara/tests/unit/test_main.py sahara/tests/unit/testutils.py sahara/tests/unit/api/__init__.py sahara/tests/unit/api/test_acl.py sahara/tests/unit/api/middleware/__init__.py sahara/tests/unit/api/middleware/test_auth_valid.py sahara/tests/unit/cli/__init__.py sahara/tests/unit/cli/test_sahara_cli.py sahara/tests/unit/cli/test_sahara_status.py sahara/tests/unit/cli/image_pack/__init__.py sahara/tests/unit/cli/image_pack/test_image_pack_api.py sahara/tests/unit/conductor/__init__.py sahara/tests/unit/conductor/base.py sahara/tests/unit/conductor/test_api.py sahara/tests/unit/conductor/test_resource.py sahara/tests/unit/conductor/manager/__init__.py sahara/tests/unit/conductor/manager/test_clusters.py sahara/tests/unit/conductor/manager/test_defaults.py sahara/tests/unit/conductor/manager/test_edp.py sahara/tests/unit/conductor/manager/test_edp_interface.py sahara/tests/unit/conductor/manager/test_from_template.py sahara/tests/unit/conductor/manager/test_templates.py sahara/tests/unit/db/__init__.py sahara/tests/unit/db/test_utils.py sahara/tests/unit/db/migration/__init__.py sahara/tests/unit/db/migration/test_db_manage_cli.py sahara/tests/unit/db/migration/test_migrations.py sahara/tests/unit/db/migration/test_migrations_base.py sahara/tests/unit/db/sqlalchemy/__init__.py sahara/tests/unit/db/sqlalchemy/test_types.py sahara/tests/unit/db/templates/__init__.py sahara/tests/unit/db/templates/common.py sahara/tests/unit/db/templates/test_delete.py sahara/tests/unit/db/templates/test_update.py sahara/tests/unit/db/templates/test_utils.py sahara/tests/unit/plugins/__init__.py sahara/tests/unit/plugins/test_base_plugins_support.py sahara/tests/unit/plugins/test_images.py sahara/tests/unit/plugins/test_kerberos.py sahara/tests/unit/plugins/test_labels.py sahara/tests/unit/plugins/test_provide_recommendations.py sahara/tests/unit/plugins/test_provisioning.py sahara/tests/unit/plugins/test_utils.py sahara/tests/unit/resources/dfs_admin_0_nodes.txt sahara/tests/unit/resources/dfs_admin_1_nodes.txt sahara/tests/unit/resources/dfs_admin_3_nodes.txt sahara/tests/unit/resources/test-default.xml sahara/tests/unit/service/__init__.py sahara/tests/unit/service/test_coordinator.py sahara/tests/unit/service/test_engine.py sahara/tests/unit/service/test_networks.py sahara/tests/unit/service/test_ntp_service.py sahara/tests/unit/service/test_ops.py sahara/tests/unit/service/test_periodic.py sahara/tests/unit/service/test_quotas.py sahara/tests/unit/service/test_sessions.py sahara/tests/unit/service/test_trusts.py sahara/tests/unit/service/test_volumes.py sahara/tests/unit/service/api/__init__.py sahara/tests/unit/service/api/test_v10.py sahara/tests/unit/service/api/v2/__init__.py sahara/tests/unit/service/api/v2/base.py sahara/tests/unit/service/api/v2/test_clusters.py sahara/tests/unit/service/api/v2/test_images.py sahara/tests/unit/service/api/v2/test_plugins.py sahara/tests/unit/service/castellan/__init__.py sahara/tests/unit/service/castellan/test_sahara_key_manager.py sahara/tests/unit/service/edp/__init__.py sahara/tests/unit/service/edp/edp_test_utils.py sahara/tests/unit/service/edp/test_hdfs_helper.py sahara/tests/unit/service/edp/test_job_manager.py sahara/tests/unit/service/edp/test_job_possible_configs.py sahara/tests/unit/service/edp/test_job_utils.py sahara/tests/unit/service/edp/test_json_api_examples.py sahara/tests/unit/service/edp/test_s3_common.py sahara/tests/unit/service/edp/binary_retrievers/__init__.py sahara/tests/unit/service/edp/binary_retrievers/test_dispatch.py sahara/tests/unit/service/edp/binary_retrievers/test_internal_swift.py sahara/tests/unit/service/edp/binary_retrievers/test_manila.py sahara/tests/unit/service/edp/data_sources/__init__.py sahara/tests/unit/service/edp/data_sources/base_test.py sahara/tests/unit/service/edp/data_sources/data_source_manager_support_test.py sahara/tests/unit/service/edp/data_sources/hdfs/__init__.py sahara/tests/unit/service/edp/data_sources/hdfs/test_hdfs_type.py sahara/tests/unit/service/edp/data_sources/manila/__init__.py sahara/tests/unit/service/edp/data_sources/manila/test_manila_type.py sahara/tests/unit/service/edp/data_sources/maprfs/__init__.py sahara/tests/unit/service/edp/data_sources/maprfs/test_maprfs_type_validation.py sahara/tests/unit/service/edp/data_sources/s3/__init__.py sahara/tests/unit/service/edp/data_sources/s3/test_s3_type.py sahara/tests/unit/service/edp/data_sources/swift/__init__.py sahara/tests/unit/service/edp/data_sources/swift/test_swift_type.py sahara/tests/unit/service/edp/job_binaries/__init__.py sahara/tests/unit/service/edp/job_binaries/job_binary_manager_support.py sahara/tests/unit/service/edp/job_binaries/test_base.py sahara/tests/unit/service/edp/job_binaries/internal_db/__init__.py sahara/tests/unit/service/edp/job_binaries/internal_db/test_internal_db_type.py sahara/tests/unit/service/edp/job_binaries/manila/__init__.py sahara/tests/unit/service/edp/job_binaries/manila/test_manila_type.py sahara/tests/unit/service/edp/job_binaries/s3/__init__.py sahara/tests/unit/service/edp/job_binaries/s3/test_s3_type.py sahara/tests/unit/service/edp/job_binaries/swift/__init__.py sahara/tests/unit/service/edp/job_binaries/swift/test_swift_type.py sahara/tests/unit/service/edp/oozie/__init__.py sahara/tests/unit/service/edp/oozie/test_oozie.py sahara/tests/unit/service/edp/spark/__init__.py sahara/tests/unit/service/edp/spark/base.py sahara/tests/unit/service/edp/storm/__init__.py sahara/tests/unit/service/edp/storm/test_storm.py sahara/tests/unit/service/edp/utils/test_shares.py sahara/tests/unit/service/edp/workflow_creator/__init__.py sahara/tests/unit/service/edp/workflow_creator/test_create_workflow.py sahara/tests/unit/service/health/__init__.py sahara/tests/unit/service/health/test_verification_base.py sahara/tests/unit/service/heat/__init__.py sahara/tests/unit/service/heat/test_templates.py sahara/tests/unit/service/validation/__init__.py sahara/tests/unit/service/validation/test_add_tags_validation.py sahara/tests/unit/service/validation/test_cluster_create_validation.py sahara/tests/unit/service/validation/test_cluster_delete_validation.py sahara/tests/unit/service/validation/test_cluster_scaling_validation.py sahara/tests/unit/service/validation/test_cluster_template_create_validation.py sahara/tests/unit/service/validation/test_cluster_template_update_validation.py sahara/tests/unit/service/validation/test_cluster_update_validation.py sahara/tests/unit/service/validation/test_ng_template_validation_create.py sahara/tests/unit/service/validation/test_ng_template_validation_update.py sahara/tests/unit/service/validation/test_protected_validation.py sahara/tests/unit/service/validation/test_share_validations.py sahara/tests/unit/service/validation/test_validation.py sahara/tests/unit/service/validation/utils.py sahara/tests/unit/service/validation/edp/__init__.py sahara/tests/unit/service/validation/edp/test_data_source.py sahara/tests/unit/service/validation/edp/test_job.py sahara/tests/unit/service/validation/edp/test_job_binary.py sahara/tests/unit/service/validation/edp/test_job_binary_internal.py sahara/tests/unit/service/validation/edp/test_job_executor.py sahara/tests/unit/service/validation/edp/test_job_interface.py sahara/tests/unit/swift/__init__.py sahara/tests/unit/swift/test_swift_helper.py sahara/tests/unit/swift/test_utils.py sahara/tests/unit/topology/__init__.py sahara/tests/unit/topology/test_topology.py sahara/tests/unit/utils/__init__.py sahara/tests/unit/utils/test_api.py sahara/tests/unit/utils/test_api_validator.py sahara/tests/unit/utils/test_cinder.py sahara/tests/unit/utils/test_cluster.py sahara/tests/unit/utils/test_cluster_progress_ops.py sahara/tests/unit/utils/test_configs.py sahara/tests/unit/utils/test_crypto.py sahara/tests/unit/utils/test_edp.py sahara/tests/unit/utils/test_general.py sahara/tests/unit/utils/test_hacking.py sahara/tests/unit/utils/test_heat.py sahara/tests/unit/utils/test_neutron.py sahara/tests/unit/utils/test_patches.py sahara/tests/unit/utils/test_poll_utils.py sahara/tests/unit/utils/test_proxy.py sahara/tests/unit/utils/test_resources.py sahara/tests/unit/utils/test_rpc.py sahara/tests/unit/utils/test_ssh_remote.py sahara/tests/unit/utils/test_types.py sahara/tests/unit/utils/test_xml_utils.py sahara/tests/unit/utils/notification/__init__.py sahara/tests/unit/utils/notification/test_sender.py sahara/tests/unit/utils/openstack/__init__.py sahara/tests/unit/utils/openstack/test_base.py sahara/tests/unit/utils/openstack/test_heat.py sahara/tests/unit/utils/openstack/test_images.py sahara/tests/unit/utils/openstack/test_swift.py sahara/topology/__init__.py sahara/topology/topology_helper.py sahara/topology/resources/core-template.xml sahara/topology/resources/mapred-template.xml sahara/utils/__init__.py sahara/utils/api.py sahara/utils/api_validator.py sahara/utils/cluster.py sahara/utils/cluster_progress_ops.py sahara/utils/configs.py sahara/utils/crypto.py sahara/utils/edp.py sahara/utils/files.py sahara/utils/general.py sahara/utils/network.py sahara/utils/patches.py sahara/utils/poll_utils.py sahara/utils/procutils.py sahara/utils/proxy.py sahara/utils/remote.py sahara/utils/resources.py sahara/utils/rpc.py sahara/utils/ssh_remote.py sahara/utils/tempfiles.py sahara/utils/types.py sahara/utils/wsgi.py sahara/utils/xmlutils.py sahara/utils/hacking/__init__.py sahara/utils/hacking/checks.py sahara/utils/hacking/commit_message.py sahara/utils/hacking/logging_checks.py sahara/utils/notification/__init__.py sahara/utils/notification/sender.py sahara/utils/openstack/__init__.py sahara/utils/openstack/base.py sahara/utils/openstack/cinder.py sahara/utils/openstack/glance.py sahara/utils/openstack/heat.py sahara/utils/openstack/images.py sahara/utils/openstack/keystone.py sahara/utils/openstack/manila.py sahara/utils/openstack/neutron.py sahara/utils/openstack/nova.py sahara/utils/openstack/swift.py tools/cover.sh tools/lintstack.py tools/lintstack.sh tools/test-setup.sh tools/config/config-generator.sahara.conf tools/config/sahara-policy-generator.conf tools/gate/build-images././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/dependency_links.txt0000664000175000017500000000000100000000000022057 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/entry_points.txt0000664000175000017500000000331300000000000021307 0ustar00zuulzuul00000000000000[console_scripts] _sahara-subprocess = sahara.cli.sahara_subprocess:main sahara-all = sahara.cli.sahara_all:main sahara-api = sahara.cli.sahara_api:main sahara-db-manage = sahara.db.migration.cli:main sahara-engine = sahara.cli.sahara_engine:main sahara-image-pack = sahara.cli.image_pack.cli:main sahara-rootwrap = oslo_rootwrap.cmd:main sahara-status = sahara.cli.sahara_status:main sahara-templates = sahara.db.templates.cli:main [oslo.config.opts] sahara.config = sahara.config:list_opts [oslo.config.opts.defaults] sahara.config = sahara.common.config:set_config_defaults [oslo.policy.policies] sahara = sahara.common.policies:list_rules [sahara.cluster.plugins] fake = sahara.plugins.fake.plugin:FakePluginProvider [sahara.data_source.types] hdfs = sahara.service.edp.data_sources.hdfs.implementation:HDFSType manila = sahara.service.edp.data_sources.manila.implementation:ManilaType maprfs = sahara.service.edp.data_sources.maprfs.implementation:MapRFSType s3 = sahara.service.edp.data_sources.s3.implementation:S3Type swift = sahara.service.edp.data_sources.swift.implementation:SwiftType [sahara.infrastructure.engine] heat = sahara.service.heat.heat_engine:HeatEngine [sahara.job_binary.types] internal-db = sahara.service.edp.job_binaries.internal_db.implementation:InternalDBType manila = sahara.service.edp.job_binaries.manila.implementation:ManilaType s3 = sahara.service.edp.job_binaries.s3.implementation:S3Type swift = sahara.service.edp.job_binaries.swift.implementation:SwiftType [sahara.remote] ssh = sahara.utils.ssh_remote:SshRemoteDriver [sahara.run.mode] all-in-one = sahara.service.ops:LocalOps distributed = sahara.service.ops:RemoteOps [wsgi_scripts] sahara-wsgi-api = sahara.cli.sahara_api:setup_api ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/not-zip-safe0000664000175000017500000000000100000000000020237 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/pbr.json0000664000175000017500000000005700000000000017471 0ustar00zuulzuul00000000000000{"git_version": "02235e6c", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/requires.txt0000664000175000017500000000155100000000000020413 0ustar00zuulzuul00000000000000Flask>=1.0.2 Jinja2>=2.10 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 WebOb>=1.7.1 alembic>=0.9.6 botocore>=1.5.1 castellan>=0.16.0 eventlet>=0.26.0 iso8601>=0.1.11 jsonschema>=3.2.0 keystoneauth1>=3.4.0 keystonemiddleware>=4.17.0 microversion-parse>=0.2.1 oslo.concurrency>=3.26.0 oslo.config>=6.8.0 oslo.context>=2.22.0 oslo.db>=6.0.0 oslo.i18n>=3.15.3 oslo.log>=3.36.0 oslo.messaging>=10.2.0 oslo.middleware>=3.31.0 oslo.policy>=3.6.0 oslo.rootwrap>=5.8.0 oslo.serialization!=2.19.1,>=2.18.0 oslo.service>=1.31.0 oslo.upgradecheck>=1.3.0 oslo.utils>=4.5.0 paramiko>=2.7.1 pbr!=2.1.0,>=2.0.0 python-cinderclient!=4.0.0,>=3.3.0 python-glanceclient>=2.8.0 python-heatclient>=1.10.0 python-keystoneclient>=3.8.0 python-manilaclient>=1.16.0 python-neutronclient>=6.7.0 python-novaclient>=9.1.0 python-swiftclient>=3.2.0 requests>=2.23.0 stevedore>=1.20.0 tooz>=1.58.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641486.0 sahara-16.0.0/sahara.egg-info/top_level.txt0000664000175000017500000000000700000000000020540 0ustar00zuulzuul00000000000000sahara ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.793891 sahara-16.0.0/setup.cfg0000664000175000017500000000512700000000000014706 0ustar00zuulzuul00000000000000[metadata] name = sahara summary = Sahara project description_file = README.rst license = Apache Software License python_requires = >=3.6 classifiers = Programming Language :: Python Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux author = OpenStack author_email = openstack-discuss@lists.openstack.org home_page = https://docs.openstack.org/sahara/latest/ [files] packages = sahara data_files = etc/sahara = etc/sahara/api-paste.ini etc/sahara/rootwrap.conf etc/sahara/rootwrap.d = etc/sahara/rootwrap.d/* [entry_points] console_scripts = sahara-all = sahara.cli.sahara_all:main sahara-api = sahara.cli.sahara_api:main sahara-engine = sahara.cli.sahara_engine:main sahara-db-manage = sahara.db.migration.cli:main sahara-rootwrap = oslo_rootwrap.cmd:main _sahara-subprocess = sahara.cli.sahara_subprocess:main sahara-templates = sahara.db.templates.cli:main sahara-image-pack = sahara.cli.image_pack.cli:main sahara-status = sahara.cli.sahara_status:main wsgi_scripts = sahara-wsgi-api = sahara.cli.sahara_api:setup_api sahara.cluster.plugins = fake = sahara.plugins.fake.plugin:FakePluginProvider sahara.data_source.types = hdfs = sahara.service.edp.data_sources.hdfs.implementation:HDFSType manila = sahara.service.edp.data_sources.manila.implementation:ManilaType maprfs = sahara.service.edp.data_sources.maprfs.implementation:MapRFSType swift = sahara.service.edp.data_sources.swift.implementation:SwiftType s3 = sahara.service.edp.data_sources.s3.implementation:S3Type sahara.job_binary.types = internal-db = sahara.service.edp.job_binaries.internal_db.implementation:InternalDBType manila = sahara.service.edp.job_binaries.manila.implementation:ManilaType swift = sahara.service.edp.job_binaries.swift.implementation:SwiftType s3 = sahara.service.edp.job_binaries.s3.implementation:S3Type sahara.infrastructure.engine = heat = sahara.service.heat.heat_engine:HeatEngine sahara.remote = ssh = sahara.utils.ssh_remote:SshRemoteDriver sahara.run.mode = all-in-one = sahara.service.ops:LocalOps distributed = sahara.service.ops:RemoteOps oslo.config.opts = sahara.config = sahara.config:list_opts oslo.config.opts.defaults = sahara.config = sahara.common.config:set_config_defaults oslo.policy.policies = sahara = sahara.common.policies:list_rules [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/setup.py0000664000175000017500000000137600000000000014601 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/test-requirements.txt0000664000175000017500000000123700000000000017324 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. hacking>=3.0.1,<3.1.0 # Apache-2.0 PyMySQL>=0.8.0 # MIT License bandit>=1.1.0,<1.6.0 # Apache-2.0 bashate>=0.5.1 # Apache-2.0 coverage!=4.4,>=4.0 # Apache-2.0 doc8>=0.6.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD oslotest>=3.2.0 # Apache-2.0 stestr>=1.0.0 # Apache-2.0 psycopg2>=2.8.0 # LGPL/ZPL pylint==1.4.5 # GPLv2 testresources>=2.0.0 # Apache-2.0/BSD testscenarios>=0.4 # Apache-2.0/BSD testtools>=2.4.0 # MIT python-saharaclient>=1.4.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.789891 sahara-16.0.0/tools/0000775000175000017500000000000000000000000014220 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.789891 sahara-16.0.0/tools/config/0000775000175000017500000000000000000000000015465 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tools/config/config-generator.sahara.conf0000664000175000017500000000066600000000000023033 0ustar00zuulzuul00000000000000[DEFAULT] wrap_width = 79 namespace = sahara.config namespace = keystonemiddleware.auth_token namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.middleware.healthcheck namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = oslo.service.periodic_task namespace = oslo.service.sslutils namespace = oslo.service.wsgi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tools/config/sahara-policy-generator.conf0000664000175000017500000000011100000000000023045 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/sahara/policy.yaml.sample namespace = sahara ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tools/cover.sh0000775000175000017500000000510100000000000015672 0ustar00zuulzuul00000000000000#!/bin/bash # # Copyright 2015: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. ALLOWED_EXTRA_MISSING=4 show_diff () { head -1 $1 diff -U 0 $1 $2 | sed 1,2d } package_name=${PACKAGE_NAME:-sahara} export PYTHON="coverage run --source ${package_name} --parallel-mode" run_coverage () { find . -type f -name "*.pyc" -delete && coverage erase && \ stestr run "$*" coverage combine } # Stash uncommitted changes, checkout master and save coverage report uncommitted=$(git status --porcelain | grep -v "^??") [[ -n $uncommitted ]] && git stash > /dev/null git checkout HEAD^ baseline_report=$(mktemp -t sahara_coverageXXXXXXX) run_coverage "$*" coverage report > $baseline_report baseline_missing=$(awk '/^TOTAL/ { print $3 }' $baseline_report) # Checkout back and unstash uncommitted changes (if any) git checkout - [[ -n $uncommitted ]] && git stash pop > /dev/null # Generate and save coverage report current_report=$(mktemp -t sahara_coverageXXXXXXX) run_coverage "$*" coverage report > $current_report current_missing=$(awk '/^TOTAL/ { print $3 }' $current_report) coverage html -d cover coverage xml -o cover/coverage.xml # Show coverage details allowed_missing=$((baseline_missing+ALLOWED_EXTRA_MISSING)) echo "Allowed to introduce missing lines : ${ALLOWED_EXTRA_MISSING}" echo "Missing lines in master : ${baseline_missing}" echo "Missing lines in proposed change : ${current_missing}" if [ -z "$current_missing" ]; then echo "No coverage found!" exit_code=1 elif [ $allowed_missing -gt $current_missing ]; then if [ $baseline_missing -lt $current_missing ]; then show_diff $baseline_report $current_report echo "I believe you can cover all your code with 100% coverage!" else echo "Thank you! You are awesome! Keep writing unit tests! :)" fi exit_code=0 else show_diff $baseline_report $current_report echo "Please write more unit tests, we should keep our test coverage :( " exit_code=1 fi rm $baseline_report $current_report exit $exit_code ././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1648641486.789891 sahara-16.0.0/tools/gate/0000775000175000017500000000000000000000000015140 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tools/gate/build-images0000775000175000017500000000502100000000000017426 0ustar00zuulzuul00000000000000#!/bin/bash -xe # The script fails at the first error PLUGIN=$1 function setup_build_env() { source /etc/os-release || source /usr/lib/os-release if [ "${ID}" = "ubuntu" ]; then # The Ubuntu kernel, for mysterious reasons, can be read only by root. Fix it. # See https://bugs.launchpad.net/ubuntu/+source/linux/+bug/759725 sudo dpkg-statoverride --add --update root root 0644 /boot/vmlinuz-$(uname -r) fi } function get_cloud_image() { # Download the cloud image for the specified distro and version local required_name="$1" local distro_name="$2" local img_url="" case "${distro_name}" in "centos7") cache_name="CentOS-7-x86_64-GenericCloud.qcow2" img_url="http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2" ;; "ubuntu-trusty") # assume trusty for now cache_name="trusty-server-cloudimg-amd64-disk1.img" img_url="https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img" ;; *) ;; esac # download the image to the cache if [ ! -f "${cache_name}" ]; then curl -o "${cache_name}" "${img_url}" fi cp -f ${cache_name} ${required_name} } function build_images() { # build all the images for the specified plugin_name # - plugin_name: name of the plugin as required by sahara-image-pack # - plugin_version: version of the plugin # - distributions: list of distributions for the version of the plugin local plugin_name="$1" local plugin_version="$2" local distributions="$3" local image_name="" for distro in ${distributions}; do image_name="${distro}_${plugin_name}_${plugin_version}.qcow2" get_cloud_image "${image_name}" "${distro}" tox -e images -- sahara-image-pack --image "${image_name}" "${plugin_name}" "${plugin_version}" done } setup_build_env # This define the matrix: for each plugin version, add a line like: # build_images "" "" " " case "$PLUGIN" in "cdh") build_images "cdh" "5.9.0" "centos7" build_images "cdh" "5.11.0" "centos7" build_images "cdh" "5.13.0" "centos7" ;; "ambari") build_images "ambari" "2.4" "centos7" ;; "mapr") build_images "mapr" "5.2.0.mrv2" "centos7" ;; "spark") build_images "spark" "2.3" "centos7" ;; *) echo "Invalid version" ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tools/lintstack.py0000775000175000017500000001417300000000000016577 0ustar00zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2012, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Stolen from OpenStack Nova """pylint error checking.""" import json import re import sys from pylint import lint from pylint.reporters import text from six.moves import cStringIO as StringIO # Note(maoy): E1103 is error code related to partial type inference ignore_codes = ["E1103"] # Note(maoy): the error message is the pattern of E0202. It should be ignored # for sahara.tests modules ignore_messages = ["An attribute affected in sahara.tests"] KNOWN_PYLINT_EXCEPTIONS_FILE = "tools/pylint_exceptions" class LintOutput(object): _cached_filename = None _cached_content = None def __init__(self, filename, lineno, line_content, code, message, lintoutput): self.filename = filename self.lineno = lineno self.line_content = line_content self.code = code self.message = message self.lintoutput = lintoutput @classmethod def from_line(cls, line): m = re.search(r"(\S+):(\d+): \[(\S+)(, \S*)?] (.*)", line) matched = m.groups() filename, lineno, code, message = (matched[0], int(matched[1]), matched[2], matched[-1]) if cls._cached_filename != filename: with open(filename) as f: cls._cached_content = list(f.readlines()) cls._cached_filename = filename line_content = cls._cached_content[lineno - 1].rstrip() return cls(filename, lineno, line_content, code, message, line.rstrip()) @classmethod def from_msg_to_dict(cls, msg): """From the output of pylint msg, to a dict, where each key is a unique error identifier, value is a list of LintOutput """ result = {} for line in msg.splitlines(): if line.startswith('*****'): continue obj = cls.from_line(line) if obj.is_ignored(): continue key = obj.key() if key not in result: result[key] = [] result[key].append(obj) return result def is_ignored(self): if self.code in ignore_codes: return True if any(msg in self.message for msg in ignore_messages): return True return False def key(self): if self.code in ["E1101", "E1103"]: # These two types of errors are like Foo class has no member bar. # We discard the source code so that the error will be ignored # next time another Foo.bar is encountered. return self.message, "" return self.message, self.line_content.strip() def json(self): return json.dumps(self.__dict__) def review_str(self): return ("File %(filename)s\nLine %(lineno)d:%(line_content)s\n" "%(code)s: %(message)s" % self.__dict__) class ErrorKeys(object): @classmethod def print_json(cls, errors, output=sys.stdout): print("# automatically generated by tools/lintstack.py", file=output) for i in sorted(errors.keys()): print(json.dumps(i), file=output) @classmethod def from_file(cls, filename): keys = set() for line in open(filename): if line and line[0] != "#": d = json.loads(line) keys.add(tuple(d)) return keys def run_pylint(): buff = StringIO() reporter = text.ParseableTextReporter(output=buff) args = ["--include-ids=y", "-E", "sahara"] lint.Run(args, reporter=reporter, exit=False) val = buff.getvalue() buff.close() return val # noinspection PyTypeChecker def generate_error_keys(msg=None): print("Generating", KNOWN_PYLINT_EXCEPTIONS_FILE) if msg is None: msg = run_pylint() errors = LintOutput.from_msg_to_dict(msg) with open(KNOWN_PYLINT_EXCEPTIONS_FILE, "w") as f: ErrorKeys.print_json(errors, output=f) def validate(newmsg=None): print("Loading", KNOWN_PYLINT_EXCEPTIONS_FILE) known = ErrorKeys.from_file(KNOWN_PYLINT_EXCEPTIONS_FILE) if newmsg is None: print("Running pylint. Be patient...") newmsg = run_pylint() errors = LintOutput.from_msg_to_dict(newmsg) print("Unique errors reported by pylint: was %d, now %d." % (len(known), len(errors))) passed = True for err_key, err_list in errors.items(): for err in err_list: if err_key not in known: print(err.lintoutput) print() passed = False if passed: print("Congrats! pylint check passed.") redundant = known - set(errors.keys()) if redundant: print("Extra credit: some known pylint exceptions disappeared.") for i in sorted(redundant): print(json.dumps(i)) print("Consider regenerating the exception file if you will.") else: print("Please fix the errors above. If you believe they are false" " positives, run 'tools/lintstack.py generate' to overwrite.") sys.exit(1) def usage(): print("""Usage: tools/lintstack.py [generate|validate] To generate pylint_exceptions file: tools/lintstack.py generate To validate the current commit: tools/lintstack.py """) def main(): option = "validate" if len(sys.argv) > 1: option = sys.argv[1] if option == "generate": generate_error_keys() elif option == "validate": validate() else: usage() if __name__ == "__main__": main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tools/lintstack.sh0000775000175000017500000000424400000000000016557 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # Copyright (c) 2012-2013, AT&T Labs, Yun Mao # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # Stolen from OpenStack Nova # Use lintstack.py to compare pylint errors. # We run pylint twice, once on HEAD, once on the code before the latest # commit for review. set -e TOOLS_DIR=$(cd $(dirname "$0") && pwd) # Get the current branch name. GITHEAD=`git rev-parse --abbrev-ref HEAD` if [[ "$GITHEAD" == "HEAD" ]]; then # In detached head mode, get revision number instead GITHEAD=`git rev-parse HEAD` echo "Currently we are at commit $GITHEAD" else echo "Currently we are at branch $GITHEAD" fi cp -f $TOOLS_DIR/lintstack.py $TOOLS_DIR/lintstack.head.py if git rev-parse HEAD^2 2>/dev/null; then # The HEAD is a Merge commit. Here, the patch to review is # HEAD^2, the master branch is at HEAD^1, and the patch was # written based on HEAD^2~1. PREV_COMMIT=`git rev-parse HEAD^2~1` git checkout HEAD~1 # The git merge is necessary for reviews with a series of patches. # If not, this is a no-op so won't hurt either. git merge $PREV_COMMIT else # The HEAD is not a merge commit. This won't happen on gerrit. # Most likely you are running against your own patch locally. # We assume the patch to examine is HEAD, and we compare it against # HEAD~1 git checkout HEAD~1 fi # First generate tools/pylint_exceptions from HEAD~1 $TOOLS_DIR/lintstack.head.py generate # Then use that as a reference to compare against HEAD git checkout $GITHEAD $TOOLS_DIR/lintstack.head.py echo "Check passed. FYI: the pylint exceptions are:" cat $TOOLS_DIR/pylint_exceptions ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tools/test-setup.sh0000775000175000017500000000373600000000000016705 0ustar00zuulzuul00000000000000#!/bin/bash -xe # This script will be run by OpenStack CI before unit tests are run, # it sets up the test system as needed. # Developers should setup their test systems in a similar way. # This setup needs to be run as a user that can run sudo. # The root password for the MySQL database; pass it in via # MYSQL_ROOT_PW. DB_ROOT_PW=${MYSQL_ROOT_PW:-insecure_slave} # This user and its password are used by the tests, if you change it, # your tests might fail. DB_USER=openstack_citest DB_PW=openstack_citest sudo -H mysqladmin -u root password $DB_ROOT_PW # It's best practice to remove anonymous users from the database. If # a anonymous user exists, then it matches first for connections and # other connections from that host will not work. sudo -H mysql -u root -p$DB_ROOT_PW -h localhost -e " DELETE FROM mysql.user WHERE User=''; FLUSH PRIVILEGES; CREATE USER '$DB_USER'@'%' IDENTIFIED BY '$DB_PW'; GRANT ALL PRIVILEGES ON *.* TO '$DB_USER'@'%' WITH GRANT OPTION;" # Now create our database. mysql -u $DB_USER -p$DB_PW -h 127.0.0.1 -e " SET default_storage_engine=MYISAM; DROP DATABASE IF EXISTS openstack_citest; CREATE DATABASE openstack_citest CHARACTER SET utf8;" # Same for PostgreSQL # The root password for the PostgreSQL database; pass it in via # POSTGRES_ROOT_PW. DB_ROOT_PW=${POSTGRES_ROOT_PW:-insecure_slave} # Setup user root_roles=$(sudo -H -u postgres psql -t -c " SELECT 'HERE' from pg_roles where rolname='$DB_USER'") if [[ ${root_roles} == *HERE ]];then sudo -H -u postgres psql -c "ALTER ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" else sudo -H -u postgres psql -c "CREATE ROLE $DB_USER WITH SUPERUSER LOGIN PASSWORD '$DB_PW'" fi # Store password for tests cat << EOF > $HOME/.pgpass *:*:*:$DB_USER:$DB_PW EOF chmod 0600 $HOME/.pgpass # Now create our database psql -h 127.0.0.1 -U $DB_USER -d template1 -c "DROP DATABASE IF EXISTS openstack_citest" createdb -h 127.0.0.1 -U $DB_USER -l C -T template0 -E utf8 openstack_citest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1648641450.0 sahara-16.0.0/tox.ini0000664000175000017500000001273400000000000014402 0ustar00zuulzuul00000000000000[tox] envlist = py38,pep8,genpolicy minversion = 3.14.4 skipsdist = True # this allows tox to infer the base python from the environment name # and override any basepython configured in this file ignore_basepython_conflict = true [testenv] basepython = python3 usedevelop = True install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} DISCOVER_DIRECTORY=sahara/tests/unit deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/yoga} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands = stestr run {posargs} passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY [testenv:cover] setenv = PACKAGE_NAME=sahara commands = {toxinidir}/tools/cover.sh {posargs} [testenv:debug-py36] basepython = python3.6 commands = oslo_debug_helper -t sahara/tests/unit {posargs} [testenv:debug-py37] basepython = python3.7 commands = oslo_debug_helper -t sahara/tests/unit {posargs} [testenv:pep8] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/yoga} -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/doc/requirements.txt commands = flake8 {posargs} doc8 doc/source # Run bashate checks bash -c "find devstack -not -name \*.template -and -not -name README.rst -and -not -name \*.json -type f -print0 | xargs -0 bashate -v" # Run security linter bandit -c bandit.yaml -r sahara -n5 -p sahara_default -x tests [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file tools/config/sahara-policy-generator.conf [testenv:venv] commands = {posargs} [testenv:images] sitepackages = True commands = {posargs} [testenv:docs] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/yoga} -r{toxinidir}/requirements.txt -r{toxinidir}/doc/requirements.txt commands = rm -rf doc/html doc/build rm -rf api-ref/build api-ref/html rm -rf doc/source/apidoc doc/source/api sphinx-build -W -b html doc/source doc/build/html sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html whereto doc/source/_extra/.htaccess doc/test/redirect-tests.txt whitelist_externals = rm [testenv:api-ref] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/yoga} -r{toxinidir}/doc/requirements.txt install_command = pip install -U --force-reinstall {opts} {packages} commands = rm -rf api-ref/build api-ref/html sphinx-build -W -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html whitelist_externals = rm [testenv:pylint] setenv = VIRTUAL_ENV={envdir} commands = bash tools/lintstack.sh [testenv:genconfig] commands = oslo-config-generator --config-file tools/config/config-generator.sahara.conf \ --output-file etc/sahara/sahara.conf.sample [testenv:releasenotes] deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/yoga} -r{toxinidir}/doc/requirements.txt commands = rm -rf releasenotes/build releasenotes/html sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html whitelist_externals = rm [testenv:debug] # It runs tests from the specified dir (default is sahara/tests) # in interactive mode, so, you could use pbr for tests debug. # Example usage: tox -e debug -- -t sahara/tests/unit some.test.path # https://docs.openstack.org/oslotest/latest/features.html#debugging-with-oslo-debug-helper commands = oslo_debug_helper -t sahara/tests/unit {posargs} [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -c bandit.yaml -r sahara -n5 -p sahara_default -x tests [flake8] show-source = true builtins = _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools # [H904] Delay string interpolations at logging calls # [H106] Don't put vim configuration in source files # [H203] Use assertIs(Not)None to check for None. # [H204] Use assert(Not)Equal to check for equality # [H205] Use assert(Greater|Less)(Equal) for comparison enable-extensions=H904,H106,H203,H204,H205 # [E123] Closing bracket does not match indentation of opening bracket's line # [E226] Missing whitespace around arithmetic operator # [E402] Module level import not at top of file # [E731] Do not assign a lambda expression, use a def # [W503] Line break occurred before a binary operator # [W504] Line break occurred after a binary operator # [W605] Invalid escape sequence 'x' ignore=E123,E226,E402,E731,W503,W504,W605 [hacking] import_exceptions = sahara.i18n [flake8:local-plugins] extension = S361 = checks:import_db_only_in_conductor S362 = checks:hacking_no_author_attr S363 = checks:check_oslo_namespace_imports S364 = commit_message:OnceGitCheckCommitTitleBug S365 = commit_message:OnceGitCheckCommitTitleLength S368 = checks:dict_constructor_with_list_copy S373 = logging_checks:no_translate_logs S374 = logging_checks:accepted_log_levels S375 = checks:use_jsonutils S360 = checks:no_mutable_default_args paths = ./sahara/utils/hacking [testenv:bindep] # Do not install any requirements. We want this to be fast and work even if # system dependencies are missing, since it's used to tell you what system # dependencies are missing! This also means that bindep must be installed # separately, outside of the requirements files. deps = bindep commands = bindep test [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt