././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4901726 panko-10.0.0/0000775000175000017500000000000000000000000012723 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/.coveragerc0000664000175000017500000000013000000000000015036 0ustar00zuulzuul00000000000000[run] branch = True source = panko omit = panko/tests/* [report] ignore_errors = True ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/.mailmap0000664000175000017500000000370600000000000014352 0ustar00zuulzuul00000000000000# Format is: # # Adam Gandelman Alan Pevec Alexei Kornienko ChangBo Guo(gcb) Chang Bo Guo Chinmaya Bharadwaj chinmay Clark Boylan Doug Hellmann Fei Long Wang Fengqian Gao Fengqian Fengqian Gao Fengqian.Gao Gordon Chung gordon chung Gordon Chung Gordon Chung Gordon Chung gordon chung Ildiko Vancsa Ildiko John H. Tran John Tran Julien Danjou LiuSheng liu-sheng Mehdi Abaakouk Nejc Saje Nejc Saje Nicolas Barcet (nijaba) Pádraig Brady Rich Bowen Sandy Walsh Sascha Peilicke Sean Dague Shengjie Min shengjie-min Shuangtai Tian shuangtai Swann Croiset ZhiQiang Fan ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/.stestr.conf0000664000175000017500000000020600000000000015172 0ustar00zuulzuul00000000000000[DEFAULT] test_path=${OS_TEST_PATH:-./panko/tests} top_dir=./ group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_(?:prefix_|)[^_]+)_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/.zuul.yaml0000664000175000017500000000324200000000000014665 0ustar00zuulzuul00000000000000- project: templates: - openstack-python3-wallaby-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - telemetry-dsvm-integration: irrelevant-files: &base-irrelevant-files - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^.git.*$ - ^doc/.*$ - ^panko/hacking/.*$ - ^panko/locale/.*$ - ^panko/tests/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ - telemetry-dsvm-integration-ipv6-only: irrelevant-files: *base-irrelevant-files # TripleO jobs that deploy Telemetry. # Note we don't use a project-template here, so it's easier # to disable voting on one specific job if things go wrong. # tripleo-ci-centos-7-scenario00(1|2)-multinode-oooq will only # run on stable/pike while the -container will run in Queens # and beyond. # If you need any support to debug these jobs in case of # failures, please reach us on #tripleo IRC channel. - tripleo-ci-centos-7-scenario001-multinode-oooq: voting: false - tripleo-ci-centos-7-scenario001-standalone: voting: false - tripleo-ci-centos-7-scenario002-multinode-oooq: voting: false - tripleo-ci-centos-7-scenario002-standalone: voting: false gate: jobs: - telemetry-dsvm-integration: irrelevant-files: *base-irrelevant-files - telemetry-dsvm-integration-ipv6-only: irrelevant-files: *base-irrelevant-files ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/AUTHORS0000664000175000017500000003333300000000000014000 0ustar00zuulzuul00000000000000Abhishek Chanda Abhishek Lekshmanan Abhishek Lekshmanan Adelina Tuvenie Ajaya Agrawal Akhil Hingane Ala Rezmerita Alessandro Pilotti Alex Holden Alexei Kornienko Amy Fong Ana Malagon Ananya Chatterjee Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Angus Lees Angus Salkeld Ann Kamyshnikova Artur Svechnikov Ashwin Agate Balazs Gibizer Bartosz Górski Ben Nemec Ben Nemec Boris Bobrov Boris Pavlovic Brad Pokorny Brant Knudson Brian Cline Brian Moss Brooklyn Chen Béla Vancsics Can ZHANG Cao Xuan Hoang Cedric Soulas Chad Lung Chandan Kumar Chandan Kumar ChangBo Guo(gcb) Chaozhe.Chen ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah Chris Dent Chris Dent Christian Berendt Christian Martinez Christian Schwede Chuck Short Clark Boylan Claudiu Belu Corey Bryant Cyril Roelandt Cyril Roelandt Damian Van Vuuren Dan Florea Dan Prince Dan Travis Dao Cong Tien Darren Birkett Davanum Srinivas David Peraza David Rabel Dazhao Debo~ Dutta DeepaJon Dina Belova Dirk Mueller Divya Dong Ma Doug Hellmann Drew Thorstensen Duong Ha-Quang Edwin Zhai Emilien Macchi Emma Foley Endre Karlson Eoghan Glynn Eoghan Glynn Eric Brown Eyal Fabio Giannetti Fei Long Wang Feng Xi Yan Fengqian Gao Flavio Percoco François Charlier François Rossigneux Frederic FAURE Gangyi Luo Gauvain Pocentek Gerard Garcia Ghanshyam Mann Gordon Chung Graham Binns Guangyu Suo Hang Liu Hangdong Zhang Hanxi Hanxi Liu Hanxi_Liu Haomeng, Wang Harri Hämäläinen Hervé Beraud Hisashi Osanai Hoang Trung Hieu Hongbin Lu Ian Wienand Igor Degtiarov Ihar Hrachyshka Ildiko Vancsa Ilya Sviridov Ilya Tyaptin Ionuț Arțăriși Ivan Kolodyazhny Jake Liu James E. Blair James E. Blair Jason Myers Jason Zhang Jay Lau Jay Pipes Jeremy Liu Jeremy Stanley Jie Li Jim Rollenhagen Jimmy McCrory Joanna H. Huang Joe Gordon Joe H. Rahme John H. Tran John Herndon JordanP JuPing Julien Danjou Justin SB KIYOHIRO ADACHI Kamil Rykowski Keith Byrne Ken Pepple Ken'ichi Ohmichi Ken'ichi Ohmichi Kennan Kennan Kevin McDonald Kevin_Zheng Kirill Bespalov Kishore Juigil Koert van der Veer Komei Shimamura Ladislav Smola Lan Qi song Lance Bragstad Lena Novokshonova Lianhao Lu LinuxJedi LiuSheng Luis A. Garcia Luis Pigueiras Luo Gangyi Luong Anh Tuan Maho Koshiya Marios Andreou Mark McClain Mark McLoughlin Martin Geisler Martin Kletzander Mathew Odden Mathieu Gagné Matt Riedemann Matthias Runge Mehdi Abaakouk Mehdi Abaakouk Michael Krotscheck Michael Still Michał Jastrzębski Miguel Alex Cantu Miguel Grinberg Mike Spreitzer Ming Shuang Xian Monsyne Dragon Monty Taylor Morgan Fainberg Nadya Privalova Nadya Shakhat Nejc Saje Nguyen Van Trung Nick Barcet Nicolas Barcet (nijaba) Noorul Islam K M Octavian Ciuhandu OpenStack Release Bot PanFengyun PanFengyun Patrick East Paul Belanger Pavlo Shchelokovskyy Peter Portante Phil Neal Piyush Masrani Pradeep Kilambi Pradeep Kilambi Pradeep Kumar Singh Pradyumna Sampath Pádraig Brady Qiaowei Ren Rabi Mishra Rafael Folco Rafael Rivero Rich Bowen Rikimaru Honjo Rob Raymond Robert Collins Robert Mizielski Rohit Jaiswal Romain Soufflet Roman Bogorodskiy Rosario Di Somma Ruslan Aliev Russell Bryant Ryan Petrello Ryota MIBU Saba Ahmed Sam Morrison Samta Samuel Merritt Sandy Walsh Sanja Nosan Sascha Peilicke Sean Dague Sean McGinnis Sergey Lukjanov Sergey Vilgelm Shane Wang Shengjie Min Shilla Saebi Shuangtai Tian Shubham Chitranshi Simona Iuliana Toader Sofer Athlan-Guyot Srinivas Sakhamuri Stas Maksimov Stefano Zilli Stephen Balukoff Stephen Gran Steve Lewis Steve Martinelli Steven Berler Sumant Murke Surya Prabhakar Svetlana Shturm Swami Reddy Swann Croiset Swapnil Kulkarni (coolsvap) Sylvain Afchain Takashi NATSUME Tatsuro Makita Terri Yu Thierry Carrez Thomas Bechtold Thomas Goirand Thomas Herve Thomas Herve Thomas Maddox Tong Li Tony Breeds Tovin Seven Trinh Nguyen Ubuntu Victor Stinner Victor Stinner Vieri <15050873171@163.com> Vitalii Lebedynskyi Vitaly Gridnev Vladislav Kuzmin Vu Cong Tuan Wenzhi Yu Wu Wenxiang Xia Linjuan XiaBing Yao XiaojueGuan Yaguang Tang Yanyan Hu Yassine Lamgarchal Yathiraj Udupi You Yamagata Yuanbin.Chen Yunhong, Jiang Yuriy Zveryanskyy Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan ZhongShengping Zhongyue Luo Zi Lian Ji aggaatul ananya23d annegentle ansaba astacksu ccrouch chenxing csatari eNovance emilienm florent fujioka yuuichi gecong1973 gengchc2 gengjh ghanshyam ghanshyam ghanshyam gord chung guillaume pernot hanxi.liu hgangwx jiaxi jinxingfang jizilian joyce kairoaraujo kiwik-chenrui leizhang lianghuifei lijian lingyongxu lipan liuqing liusheng liushuobj lizheming loooosy lqslan lrqrun ls1175 lvdongbing lzhijun maaoyu maniksidana019 melissaml mengalong mizeng nellysmitt replay sanuptpm sh.huang shangxiaobj shengjie min srsakhamuri tanlin terriyu unknown vagrant venkatamahesh vivek.nandavanam vivek.nandavanam wangzihao xialinjuan xiangjun li xiaozhuangqing xingzhou yanheven yuanrunsen zhang-jinnan zhang-shaoman zhangguoqing zhangyanxian zhulingjie zhurong zjingbj “zhangshengping2012” ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/CONTRIBUTING.rst0000664000175000017500000000106000000000000015361 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/panko ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398897.0 panko-10.0.0/ChangeLog0000664000175000017500000037665000000000000014516 0ustar00zuulzuul00000000000000CHANGES ======= 10.0.0 ------ * Remove six * Update fixtures * Revert "Temporarily switch dvsm-integration off" * Implement secure RBAC for segregation and telemetry policies * Add a system-reader check string to base.py * Update requirements for secure RBAC work * Update the README to point to storyboard * Remove deprecated tail\_log function * Temporarily switch dvsm-integration off * bump py37 to py38 in tox.ini * Imported Translations from Zanata * Add Python3 wallaby unit tests * Update master for stable/victoria * Remove install unnecessary packages 9.0.0 ----- * Add a /healthcheck by default * Add max\_count argument for clear\_expired\_data call * Stop to use the \_\_future\_\_ module * Switch to newer openstackdocstheme and reno versions * Add py38 package metadata * Add Python3 victoria unit tests * Update master for stable/ussuri 8.0.0 ----- * Add irrelevant-files for tempest jobs * Use unittest.mock instead of third party mock * Cleanup doc support * Cleanup py27 support * Update hacking for Python3 * Set Tempest's service\_availability setting for panko * Imported Translations from Zanata * [ussuri][goal] Drop python 2.7 support and testing * Fix error config registration of elasticsearch * Update master for stable/train 7.0.0 ----- * PDF documentation build * Run 'telemetry-dsvm-integration-ipv6-only' job in gate * Add Python 3 Train unit tests * Update json module to jsonutils * Replace git.openstack.org URLs with opendev.org URLs * Fix install\_command in tox.ini * OpenDev Migration Patch * Dropping the py35 testing * Integrate OSprofiler in Panko * Update min tox version to 2.0 * Imported Translations from Zanata * Move elasticsearch installation script to panko * Replace tripleo-scenario002-multinode with scenario002-standalone * Replace telemetry-tox-py37 with openstack-tox-py37 in zuul test * Replace openstack.org git:// URLs with https:// * Update master for stable/stein 6.0.0 ----- * Fix RST formatting issues in README * Correcting Panko Port in documentation * Replace tripleo-scenario001-multinode with scenario001-standalone * Don't quote {posargs} in tox.ini * add python 3.6 unit test job * switch documentation job to new PTI * import zuul job settings from project-config * add release notes to readme.rst * Imported Translations from Zanata * Reraise last error when getting DB connection 5.0.0 ----- * Add python 3.7 gating * Switch to stestr * Don't use deprecated publisher anymore * Invalid link to static file in doc * fix tox python3 overrides * Imported Translations from Zanata * storage: hide useless logs * fix error url * Trivial: Update pypi url to new url * Revert "Operator for start/end\_timestamp changes to 'eq'" * Fix doc title format error * Imported Translations from Zanata * Update reno for stable/queens * Fix indentation in Docs 4.0.0 ----- * Zuul: Remove project name * Imported Translations from Zanata * Remove use of unsupported TEMPEST\_SERVICES variable * Imported Translations from Zanata * Imported Translations from Zanata * tempest: use new plugin * change doc panko bug tracker url * Add README.rst record more project message * Fix to use . to source script files * Use native Zuul v3 tox jobs * Update uwsgi.rst doc file format * Add doc8 to pep8 check for panko project * remove paas format * Block keystonemiddleware for temporary * Removed dummy intree panko tempest plugin * Render policy in documentation * Imported Translations from Zanata * Add bindep + Remove setting of version/release from releasenotes * Remove jsonutils usage * zuul: run TripleO jobs with new zuulv3 layout * Move default policies into code * Support admin to get all events * Using --option ARGUMENT * Zuul: add file extension to playbook path * Imported Translations from Zanata * Imported Translations from Zanata * Use generic user for both zuul v2 and v3 * Move legacy jobs to project * pass empty url if missing * Imported Translations from Zanata * Remove deprecated Panko dispatcher * Operator for start/end\_timestamp changes to 'eq' * update the location of gabbits directory * Remove vestigate HUDSON\_PUBLISH\_DOCS reference * Update reno for stable/pike * Fix api's event listing query to sqlalchemy with non-admin user * Fix typos and replace http with https for doc links * sql: support bigint traits 3.0.0 ----- * cleanup panko docs * Update the documentation link for doc migration * Imported Translations from Zanata * Docs: switch to openstackdocstheme * Update Documentation link in README * Update SQLAlchemy requirement * Add sem-ver flag so pbr generates correct version * Devstack install with pankoclient * Fix panko document error * Remove non active maintainers * Add field description * Add url\_prefix parameter in Elasticsearch connection * deprecate hbase * Fix the port for Panko API in devstack * Fix html\_last\_updated\_fmt for Python3 * Stop using gnocchi reference * add noauth api pipeline * storage: Ensure pymysql is prefered * pass only database options * let requirements handle pbr version * support uwsgi * Delete unused local variable apache\_version in devstack * Remove log translations * Added example for stable branch * Add new connection parameters in ES * Imported Translations from Zanata * sqlalchemy: use nested transaction when getting/creating event types * Imported Translations from Zanata * Using fixtures.MockPatch instead of mockpatch.Patch * Removal of config file copy from manual install * don't setup logging or cors when not needed * Fix releasenotes * add ceilometer publisher * storage: get database url in right order * Trivial: remove support for py34 * Remove unused utils method * Update reno for stable/ocata 2.0.0 ----- * Optimize policy engine initialization * Update requirements * modernise gabbi usage * remove event\_\* prefixes * add tempest plugin base * remove event redundancy * remove ceilometer/meter specific hbase code * move integration to ceilometer * move tempest under panko * Enable coverage report in console output * Fix expecting content-type headers * [doc] Note lack of constraints is a choice * Replace all retrying with tenacity in Panko * Fix typo in plugin.sh * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Translate info-level log messages for LOG.error * Bump hacking to 0.12 * Imported Translations from Zanata * cors: update default configuration using cors' own set\_defaults funtion * Configuration: use stdout instead of stderr * Add http\_proxy\_to\_wsgi to panko config-generator * Remove pecan\_debug option * Add http\_proxy\_to\_wsgi to api-paste * Enable release notes translation * devstack: don't override ceilometer setup * Enable paginate query for event listing * Docstrings should not start with a space * doc: remove talk about query/complex queries * Update reno for stable/newton * Base.Model not define \_\_ne\_\_() built-in function * Fix elasticsearch-dsl for ElasticSearch 2 support 1.0.0 ----- * Pass oslo\_config\_project to keystone conf * Clean deprecated "rpc\_backend" in test\_bin * Remove unused logging import * Remove unused import logging and LOG object 1.0.0.0b3 --------- * Add missing %s in print message * Limit Happybase version < 1.0.0 * Put py34 first in the env order of tox * Do not limit elasticsearch to 2.0 * Remove Nadya Privalova from core reviewers 1.0.0.0b1 --------- * Use pbr to build WSGI script \`panko-api' * Fix docstring for dispatcher * Remove wrong comment in devstack plugin * Set Keystone endpoint type to \`event' * Enable Panko in devstack if ceilometer-collector is enabled * Add support for Python 3.5 * Remove functional tox targets * events: fix operator check in event filter * dispatcher: use the right configuration file and object * devstack: prefix \_drop\_database * devstack: fix service name in global check * tox: rename elastic search targets * devstack: do not wait for service * tests: enable functional Gabbi tests on all backends * Stop relying on global CONF object * tests: do not overide Keystone auth\_version * mongodb: switch to retrying, stop using global conf * storage: pass the conf object to drivers * mongodb: update TTL on expirer run * gabbi: rename deprecated \_ to - * tox: build all needed functional targets + fix compat with Gabbi >= 1.22 + fix raw type * Fix release notes build process and pep8 target * Rename database event dispatcher to panko * Remove base class for dispatcher * Rename to Panko * Remove code that is not related to events storage and API * collector: use an intermediate proxy class for event dispatcher * Use "topics" instead of "topic" in Notifier initialization * Imported Translations from Zanata * Copy images\_client from tempest * [dispatcher/gnocchi] add unit for metric * Delete unused last\_dup variable * catch DriverLoadFailure for get\_transport optional * Bump to Nova v2.1 * Fix the py34 jenkins job * tempest: import ImagesClient as ImagesClientV2 * fix some typos in our doc, comments and releasenotes * enable swift pollsters poll data for specific region * tempest\_plugin: drop telemetry decorator * doc: remove database alarm capability * replace deprecated heat command with OSC * Verify message's signature for every dispatcher * remove log in tools/make\_test\_{event\_}data.py * fix CI failure due to oslo.messaging 5.0.0 * remove record\_metering\_data method from collector * tests: replace overtest by pifpaf * fix opts.list\_opts for polling options * update help string for messaging\_urls * Drop timestamping in pollsters * Set the time point polling starts as timestamp of samples * tox: only install hacking in pep8 target * Remove unused pylintrc * devstack: remove useless policy\_file setting * event: verify signature before recording events for all dispatchers * tests: stop services on tests teardown * Fix oslo\_service stop/start mechanism * remove floating\_ip\_get\_all in nova\_client * Drop the executability of http.py * Updated from global requirements * remove deprecated auth type password-ceilometer-legacy * [Trivial] Update Neutron resource status list * [Trivial] Remove CEILOMETER\_API\_LOG\_DIR option for devstack * Update the default log levels * Clean some unused method in ceilometer/keystone\_client.py * remove invalid todo in storage functional test code * return 400 when invalid aggregation function is specified * Replace logging with oslo\_log * remove deprecated option database\_connection * move EventFilter to event storage namespace * remove MultipleResultsFound and NoResultFound exception * Remove useless file * remove todo for OS\_TEST\_PATH * Improve the docstring for Swift pollsters * add log decorator for neutron\_client public method * add debtcollector to requirements * Remove direct dependency on babel * Refactor floatingip pollster to use discovery * Fix notification listeners usage * notification: Remove eventlet timers * use static timestamps for api samples * refactor DefinitionException classes * collector: Don't use eventlet thread * fix openstack cli command in doc manual * Add release note link * switch to openstack cli instead of keystone cli * Updated from global requirements * libvirt: fix missing python-libvirt issue * Add status in Ceilometer VPN connection sample * document how to enable ceilometer stable branch in devstack * remove python-ceilometerclient from requirements * Imported Translations from Zanata * Updated from global requirements * Imported Translations from Zanata * Ignore the filter\_service\_activity option if gnocchi project not found * Fix Ceilometer tests config options * Updated from global requirements * Fix doc build if git is absent * Replace tempest-lib by os-testr * Add notes on moving to Gnocchi * delete verbose/redundant/deprecated text * replace fnmatch with oslo.utils.fnmatch * add ceilometer to gnocchi configuration notes * Updated from global requirements * Imported Translations from Zanata * remove complex capabilities for meter, resource and statistics * gnocchi: batch measurements * change keystone to openstack cli * re-org existing manually install notes * messaging: remove RequestContextSerializer * Remove unused context object in vpnaas test * Remove unused object from lbaas\_v2 test * Remove unused context object lbaas test * test: remove unused context object in FWaaS tests * Remove unused context objects in Glance tests * Remove unused context object in test * Remove a useless usage of oslo.context in meters API * Remove the deprecated DB2 driver * Update the Administrator Guide links * mongo: remove unused function * Updated from global requirements * drop magnetodb support * Simplify chained comparison * Enhancing Retry logic to Coordination when joining partitioning grp * publisher: clean out context usage * Disable ceilometer-aipmi by default for devstack * Remove useless context object usage * collector: never allow to lose data * 'ceilometer-polling' should fail with no valid pollsters * Imported Translations from Zanata * Fix typos in comments and config strings * Updated from global requirements * abort alarms URLs when Aodh is unavailable * abort alarms URLs when Aodh is unavailable * fix minor typo in test\_generic.py * Imported Translations from Zanata * Add the functional tests for getting events * collector: never allow to lose data * devstack Fix unprocess measure path * Imported Translations from Zanata * Devstack: install coordination backend for compute agent * remove dns and trove from entry\_points * correct docstring in storage module * Imported Translations from Zanata * Remove gabbi tests that check content-location * Add http publisher * remove dns and trove from entry\_points * Imported Translations from Zanata * Imported Translations from Zanata * Update reno for stable/mitaka * Update .gitreview for stable/mitaka * Remove gabbi tests that check content-location * Imported Translations from Zanata * remove unused field 'triggers' defined in sample event\_pipeline.yaml * remove SERVICE\_TENANT\_NAME from devstack plugin * clean devstack plugin * add rc1 release notes * Use assertIn and assertNotIn * core status cleanup * tests: remove ceilometer-api bin test cases * gate: add missing sudo * change dns and trove notifications to declarative * Remove en\_GB translations * register the config generator default hook with the right name * Imported Translations from Zanata * Updated from global requirements * tempest: migrate api and scnario tests from tempest * mitaka-3 release notes * Adjust log levels for InstanceShutOffException * Fix event\_type creationg failure due to race condition * Imported Translations from Zanata * Ignoring cpu measurement when instance's state is SHUTOFF * Add validation for polling\_namespaces option * xenapi: support the session when xenserver is slave * Imported Translations from Zanata * gnocchi dispatch: Added new resource type support * remove wrong "#!/usr/bin/env python" header * Fixed corner cases of incorrect use of oslo.config * Updated from global requirements * timedelta plugin for meter definition process * Cast Int64 values to int, float in statistics * Cache getters for the decalarative definitions * [sahara] add events definitions regarding new notifications * Moved CORS middleware configuration into oslo-config-generator * Add the meter example file 'lbaas-v2-meter-definitions.yaml' * Change default policy to allow create\_samples * Enable the Load Balancer v2 events * Remove unused pngmath Sphinx extension * Updated from global requirements * Fix a minor missing parameter issue * close services in test * Add an update interval to compute discovery * Docs: Configure meters/events dispatch separately * Fix the typo in the gnocchiclient exception * Updated from global requirements * Add gnocchi dispatcher opts to config * Change the SERVICE\_TENANT\_NAME to SERVICE\_PROJECT\_NAME * Hyper-V: replaces in-tree hyper-v utils usage with os\_win * Initial seed of hacking * Add /usr/local/{sbin,bin} to rootwrap exec\_dirs * Fix SDR file parsing for Intel Node Manager * Gnocchi: fix ResourcesDefinitionException for py3 * Change LOG.warn to LOG.warning * tests: fix unworking debug output * Adds timestamp option to Aggregation transformer * remove default=None for config options * Replace assertEqual(None, \*) with assertIsNone in tests * Trivial: Cleanup unused conf variables * Enable the Load Balancer v2 for the Ceilometer(Part Two) * Remove unused variable * Enable the Load Balancer v2 for the Ceilometer(Part One) * Fix footnote reference to Aodh in docs * Updated from global requirements * Set None explicitly to filter options * KEYSTONE\_CATALOG\_BACKEND is deprecated * Use overtest to setup functional backends * devstack: Fix Keystone v3 configuration typo * Imported Translations from Zanata * Handle malformed resource definitions gracefully * Update the home page * Skip duplicate meter definitions * set higher batching requirement * use retrying to attempt to rejoin group * network: remove deprecated option name * sample: remove deprecated option name * Fix wrong capitalization * rewriting history * Remove unused pytz requirement * devstack: use password with version discovery * fix tempest path * Updated from global requirements * raise coordination error if not registered * do not configure worker specific items in init * integration-gate: fix publicURL retrieval * rolling upgrades * fix locking in ceilometer * enable notification agent partitioning * better support notification coordination * remove useless notification listener helper * Lookup meter definition fields correctly * Enhances get\_meters to return unique meters * Imported Translations from Zanata * Updated from global requirements * Fix ceilometer floatingip pollster * Updated from global requirements * tempest: migrate base class for tests * tempest: add ceilometer tempest plugin * tempest: add telemetry client manager * tempest: migrate conf.py from tempest tree * tempest: copy telemetry client from tempest tree * Fix events rbac * Don't store events with Gnocchi * add additional mitaka-2 release notes * Corrects typo "a other" -> "another" * Updated from global requirements * add release notes for mitaka-2 * devstack: add support for Gnocchi backend * notification: Use oslo.messaging batch listener * Cleanup of Translations * Added CORS support to Ceilometer * Don't set keystonemiddleware cache * Set None explicitly to filter options * Add OSprofiler-specific events definitions * collector: Use oslo.messaging batch listener * Updated from global requirements * Changes aggregator transformer to allow retention\_time w/o size * Replace LOG.warn with LOG.warning * Updated from global requirements * wrong accumulative value of "network.services.lb.incoming.bytes" * Trivial: Remove vim header from source files * Trival: Remove unused logging import * Fix the typos in the source code * gnocchi: fix stack resource type * Misspelling in message * Clean pagination related methods of impl\_mongodb * Fix some typos in the snmp.py * remove local hacking check * [MongoDB] add indexes in event collection * Remove unused code in gnocchi dispatcher * remove unnecessary code * recheck cache after acquired gnocchi\_resource\_lock * collector: remove deprecated RPC code * fix case in function name * Catch the EndpointNotFound in keystoneauth1 than in keystoneclient * Log exception if stevedore fails to load module * Updated from global requirements * Revert "Revert "devstack config for dogpile cache"" * add per resource lock * verify gnocchi connection before processing * [refactor] remove redundant import of options * Added unit test cases for pysnmp 4.3 * Add keystoneauth1 in requirements * gnocchi: fix cache hash logic * gnocchi: use gnocchiclient instead of requests * show queue status on integration test * Updated from global requirements * using a consistent uuid as cache namespace * Duplicate information link for writing agent plugins * Use keystoneauth1 instead of manual setup * Do not mock the memcache interface for auth\_token * oslo.messaging option group/name change for notification topics * Correct the host field of instance metadata * fix the bug that gnocchi dispatcher can't process single sample * Replace stackforge with openstack * MAINTAINERS: remove outdated data * Remove version from setup.cfg * add initial release notes * fix functional gate * messaging: stop using RequestContextSerializer * Fix ceilometer-test-event.py script * Deduplicate the code about snmp meter loading * Updated from global requirements * Revert "devstack config for dogpile cache" * Revert "Workaround requests/urllib connection leaks" * add cpu.delta to gnocchi resources * simplify collector cache * Consistent publisher\_id for polling agent * build metric list on init * re-implement thread safe fnmatch * clean up integration test urls * tools: fix default resource metadata for instance * don't pass ceilometer options to oslo.db engine facade * Use str(name) instead of name.prettyPrint() * Reduce code duplication * remove config files when run clean.sh * fix some test case wrongly skipped for mysql backend * Add WebTest to test-requirements.txt * tests: remove testscenario usage for storage drivers * Remove eventlet usage * Remove alarming code * Clarify the doc about multiple notification\_topics usage * Reduced source code by extracting duplicated code * devstack config for dogpile cache * Updated from global requirements * Updated from global requirements * Fix an indent nit of enforce\_limit method * Move the content of ReleaseNotes to README.rst * use common cache * A dogpile cache of gnocchi resources * Updated from global requirements * install database when collector is enabled * Updated from global requirements * Updated from global requirements * add reno for release notes management * Updated from global requirements * Support to get hardware's cpu\_util from snmp * add rohit\_ to MAINTAINERS * gnocchi: set the default archive policy to None * Mv gabbi\_pipeline.yaml into test directories * Factorize yaml loading of declarative stuffs * Factorize field definition of declarative code * Wrong result is returned when call events getting API * tox: use pretty\_tox in most places * Updated from global requirements * avoid unnecessary inner join in get\_resources() for SQL backend * Add sql-expire-samples-only to option list * Updated from global requirements * configure Apache only when ceilometer-api is enabled * Imported Translations from Zanata * avoid using isolation level * unquote resource id to support slash in it * specify runtime environment for scripts * Using oslo-config-generator to instead of generate-config-file.sh * Use gnocchiclient for integration script * Enable signature verification for events * Correct the timestamp type when make test samples data * Updated from global requirements * avoid generate temporary table when query samples * Reject posting sample with direct=true if Gnocchi is enabled * make script under tools directory executable * Updated from global requirements * Added the README.rst in devstack folder * fix tools/make\_test\_event\_data.py * fix image\_ref attr in gnocchi resource * support mysql+pymysql in functional test * Updated from global requirements * Fix snmp pollster to not ignore valid meters * Block oslo.messaging 2.6.1 release * reset policy per test * Remove dependency on sphinxcontrib-docbookrestapi * gnocchi: remove possible ending / in URL * api: simplify root controller * api: simplify Pecan config * remove instance:FLAVOR related code and docs * Do collector setup and storage cleanup for all backends * change collector\_workers to [collector]workers * Enable POST samples API when gnocchi enabled * devstack: fix debug info for Gnocchi * Imported Translations from Zanata * Add Liberty release note link * Fix make\_test\_data.sh * Imported Translations from Zanata * Be explicit when copying files to /etc/ceilometer * Deprecate event trait plugin 'split' * Updated from global requirements * Clean some log messages when polling neutron resources * Simplify the validation of required fields of pipeline source * doc: service enablement not necessary when using Devstack plugin * Skip bad meter definitions instead of erroring out * Remove the unused network\_get\_all method * mark logging.info translation accordingly * logging cleanup * Updated from global requirements * Remove last vestiges of devstack from grenade plugin * Add missing ceilometerclient repo location * Imported Translations from Zanata * Fix for resource polling warnings * SQL: Fix event-list with multiple trait query filters * Fix the bug of "Error spelling of a word" * Imported Translations from Zanata * SQL: Fix event-list with multiple trait query filters * Fix a mistake in a test * Configure collector to only record meter or event * Rename list\_events tests to list\_samples tests * fix elasticsearch script reference * Fix the deprecation note in meter.yaml * Fix the deprecation note in meter.yaml * Remove deprecated archive policy map for Gnocchi * Remove enable\_notification.sh * Parametrize table\_prefix\_separator in hbase * Imported Translations from Zanata * fix typo in storage/impl\_sqlalchemy * devstack: install all configuration files from etc/ * dispatcher: remove deprecated CADF code in HTTP * mongodb: remove deprecated replica\_set support * Ensure the test data sample has correct signature * Open Mitaka development * gnocchi: Don't raise NotImplementedError * Add missing meter and exchange opts * Imported Translations from Zanata * Add test to cover history rule change * Workaround requests/urllib connection leaks * integration tests: additional debugging infos * Coordinator handles ToozError when joining group * Don't create neutron client at loadtime * Delete its corresponding history data when deleting an alarm * update event filter test to validate multiple trait args * Fix variable typos * Updated from global requirements * Change ignore-errors to ignore\_errors * Fix reconnecting to libvirt * remove batch processing requirement from arithmetic transformer * Cleanup empty dirs from tests * retain existing listeners on refresh * Override dispatcher option for test\_alarm\_redirect\_keystone * [ceilometer] Update links to Cloud Admin Guide * Adds support for dynamic event pipeline * Updated from global requirements * Imported Translations from Zanata * pollster/api now publish to sample queue * tox: generate config file on test run * tox: Allow to pass some OS\_\* variables * Refactor keystone handling in discovery manager * Use make\_sample\_from\_instance for net-pollster * apply limit constraint on storage base interface * gnocchi: add two new resources * Fixed tox -egenconfig Error * Add declarative meters to developer docs * add delta transfomer support * do not recreate main queue listeners on partitioning * Validate required fields in meter definition * deprecate cadf\_only http dispatcher * Fix the heavy time cost of event-list * Update API Doc to deprecate the alarming part * Deprecate config options of the old alarming functionality * update architecture documentation * Add attribute 'state' to meter metadata when source is polling * doc: update devstack usage * Remove useless base class * Split out image non-meters * Make the gabbi tox target work with modern tox * Avoid 500 errors when duplicating limit queries * Correct test\_list\_meters\_meter\_id to work with py3 * Updated from global requirements * Update event\_definitions for Cinder Image Cache * Update install docs * Use b64encode to replace of encodestring * Prevent ceilometer expirer from causing deadlocks * remove duplicate log exception message * Spelling mistake of comment in api/controllers/v2/query.py * Fix typos in gnocchi.py and converter.py * Updated from global requirements * Updated from global requirements * Add a py34-functional tox target * doc: update notification\_driver * polling: remove deprecated agents * Fix string in limit warning * Typo fixing * missed entrypoint for nova\_notifier removal * Imported Translations from Transifex * Fix links in README.rst * integration: Add debugging information * deprecate db2 nosql driver * devstack: add new option to support event-alarm * Sync devstack plugin with devstack:lib/ceilometer * Updated from global requirements * remove old nova\_notifier processing code * restrict admin event access * Migrate the old snmp pollsters to new declarative pollster * Support to load pollsters extensions at runtime * Added snmp declarative hardware pollster * Requeuing event with workload\_partitioning on publish failure * Event filtering for non-admin users * integration: fix typo * gnocchi: cleanup instance resource definition * Updated from global requirements * Adding pradk to MAINTAINERS * Adding liusheng to MAINTAINERS * Add index to metadata\_hash column of resource table * Incorrect Links are updated * Removing unused dependency: discover * Use new location of subunit2html * Change tox default targets for quick use * Fixed identity trust event types * gnocchi: quote the resource\_id in url * fix metadata for compute cpu notifications * support custom metadata * Move profiler meters to yaml * Control Events RBAC from policy.json * Events RBAC needs scoped token * make telemetry sample payloads dictionaries * Fix requeue process on event handling error * allow configurable pipeline partitioning * Keep the instance\_type meta from polling and notification consistent * Add user\_id,project\_id traits to audit events * Change json path's to start with $. for consistency * Add validation tests for arithmetic, string and prefix expressions * Fix description for "Inapt spelling of 'MongoDB'" * Create conf directory during devstack install phase * support custom timestamp * Add cpu meters to yaml * Fix description for "Incorrect spelling of a word" * integration: add some new tests * Fix disable\_non\_metric\_meters referencing * Update tests to reflect WSME 0.8 fixes * remove jsonpath-rw requirement * Do not use system config file for test * gnocchi: move to jsonpath\_rw\_ext * Updated from global requirements * Allow to run debug tox job for functional tests * Use jsonpath\_rw\_ext for meter/event definitions * preload jsonpath\_rw parsers * integration test: adjusts timeout * integration test: failfast * Updated from global requirements * Avoid recording whole instance info in log * Fix dependency for doc build * Mark record\_type in PaaS Event Format doc as optional * full multi-meter support * add flexible grouping key * Corrected test\_fallback\_meter\_path test case * Add hypervisor inspector sanity check * handle list payloads in notifications * xenapi: support the session to "unix://local" * Introduce Guru Meditation Reports into Ceilometer * Use start status of coodinator in tooz * Fixed event requeuing/ack on publisher failure * Implement consuming metrics from Magnum * Avoid from storing samples with empty or not numerical volumes * use union all when building trait query * Fixed spelling error, retreive -> retrieve * Use min and max on IntOpt option types * Update install docs with gnocchi dispatcher info * Make it possible to run postgresql functional job * Revert "Remove version from os\_auth\_url in service\_credentials" * Updated from global requirements * Use oslo\_config PortOpt support * integration: chown ceilometer directory properly * add mandatory limit value to complex query list * add test to validate jsonpath * Remove version from os\_auth\_url in service\_credentials * do not translate debug logs * Updated from global requirements * Grenade plugin using devstack plugin for ceilometer * remove alembic requirement * Convert instance, bandwidth and SwiftMiddleware meters * Change and move the workers options to corresponding service section * Drop the downgrade function of migration scripts * start rpc deprecation * support multiple-meter payloads * add poll history to avoid duplicate samples * Add Kilo release note reference * initialise opencontrail client in tests * Make ConnectionRetryTest more reliable * Correct thread handling in TranslationHook * Updated from global requirements * Correctly intialized olso config fixture for TestClientHTTPBasicAuth * Don't start up mongodb for unit test coverage * disable non-metric meter definitions * Cast Int64 values to float * Convert identity, sahara and volume to meters yaml * Enable entry points for new declarative meters * Fix for rgw still throwing errors * group pollsters by interval * Revert "Revert "remove instance: meter"" * api: fix alarm deletion and update * Fixes the kafka publisher * Sync devstack plugin with devstack:lib/ceilometer * integration: use the right user in gate * Imported Translations from Transifex * Initial separating unit and functional tests * Stop using openstack.common from keystoneclient * minimise scope of hmac mocking * Updated from global requirements * gnocchi: retry with a new token on 401 * Fix some gabbi tests * Improve comments in notification.py * mongo: fix last python3 bugs * postgres isolation level produces inconsistent reads * Masks messaging\_urls in logs during debug mode * Corrected unit of snmp based harware disk and memory meters * Provide base method for inspect\_memory\_resident * Fix Python 3 issue in opendaylight client * Fix more tests on Python 3 * Remove the compute inspector choice restriction * [MongoDB] Refactor indexes for meter and resources * tests: add an integration test * Fix WSGI replacement\_start\_response() on Python 3 * gnocchi: reduce the number of patch to gnocchi API * Make the partition coordinator log more readable * Drop out-of-time-sequence rate of change samples * [MongoDB] Use a aggregate pipeline in statistics * Instance Cache in Node Discovery Pollster * Instance Caching * Imported Translations from Transifex * fix gnocchi resources yaml * Import the api opt group in gabbi fixture * Add a batch\_polled\_samples configuration item * Remove redundant comma * storage: deprecates mongodb\_replica\_set option * Improves send\_test\_data tools * Replace isotime() with utcnow() and isoformat() * distributed coordinated notifications * Imported Translations from Transifex * Close and dispose test database setup connections * Updated from global requirements * api: Redirect request to aodh if available * api: return 410 if only Gnocchi is enabled * Fix broken IPMI agent * add mandatory limit value to meter list * add mandatory limit value to resource list * add mandatory limit value to event list * Move gnocchi resources definition in yaml file * Send a notification per sample, do not batch * Handles dns.domain.exists event in Ceilometer * Pollsters now send notifications without doing transforms * Imported Translations from Transifex * Switch to the oslo\_utils.fileutils * Updated from global requirements * Use choices for hypervisor\_inspector option * The product name Vsphere should be vSphere * Add necessary executable permission * Store and restore the xtrace option in devstack plugin * gnocchi: Remove useless resources patching * add Trove(DBaaS) events * Set conf.gnocchi\_dispatcher.url explicitly in tests * Declarative meters support * Stop the tests if backend hasn't started * Delay the start of the collector until after apache restart * Clean the re-implemented serializers in Ceilometer * monkey\_patch thread in tests * make notifier default event publisher * Fix gnocchi DispatcherTest tests * Sort metric data before grouping and processing * Namespace functions in devstack plugin * Added valid values of operator to response body * gnocchi: fixes the instance flavor type * gnocchi dispatcher: fix typo in stevedore endpoint * Imported Translations from Transifex * Tolerate alarm actions set to None * Make ceilometer work correctly when hosted with a SCRIPT\_NAME * Implementation of dynamically reloadable pipeline * fix log msg typo in api utils * Updated from global requirements * Add documentation about the usage of api-no-pipline * drop deprecated pipeline * Improve doc strings after changing method for index creation * set default limit to meter/sample queries * collector: fix test raising error * Remove test-requirements-py3.txt * remove unused event query * Create a devstack plugin for ceilometer * Add support for posting samples to notification-agent via API * restore long uuid data type * Revert "Add support for posting samples to notification-agent via API" * Update alarm history only if change in alarm property * test error log - catch dummy error * fix kafka tests from flooding logs * catch warnings from error tests * remove unused notifier * Add support for posting samples to notification-agent via API * Stop dropping deprecated tables while upgrade in mongodb and db2 * Add handler of sample creation notification * Remove the unused get\_targets method of plugin base * Replaces methods deprecated in pymongo3.0 * add oslo.service options * Restricts pipeline to have unique source names * drop use of oslo.db private attribute * Fix oslo.service configuration options building * Add fileutils to openstack-common.conf * disable non-metric meters * Remove unnecessary executable permission * Imported Translations from Transifex * Switch to oslo.service * Remove unnecessary wrapping of transformer ExtentionManager * Port test\_complex\_query to Python 3 * Fix expected error message on Python 3 * Fix usage of iterator/list on Python 3 * Replaces ensure\_index for create\_index * pip has its own download cache by default * For sake of future python3 encode FakeMemcache hashes * Make acl\_scenarios tests' keystonemiddleware cache work flexibly * Update version for Liberty * Gnocchi Dispatcher support in Ceilometer * Updated from global requirements * Fix alarm rest notifier logging to include severity * Remove useless execute bit on rst file * Fix unicode/bytes issues in API v2 tests * Fix script name in tox.ini for Elasticsearch * Fix the meter unit types to be consistent * tests: use policy\_file in group oslo\_policy * Fix publisher test\_udp on Python 3 * Fix Ceph object store tests on Python 3 * Port IPMI to Python 3 * Port middleware to Python 3 * [elasticsearch] default trait type to string * Updated from global requirements * Lower down the range for columns which are being used as uuid * Sync with latest oslo-incubator * Fix testing of agent manager with tooz * Remove deprecated Swift middleware * add DNS events * Handle database failures on api startup * Fix more tests on Python 3 * Switch to using pbr's autodoc capability * Remove old oslo.messaging aliases * Remove useless versioninfo and clean ceilometer.conf git exclusion * Register oslo\_log options before using them * Add running functional scripts for defined backend * Remove snapshot.update events as they are not sent * WSME version >=0.7 correctly returns a 405 * TraitText value restricted to max length 255 * Cause gabbi to skip on no storage sooner * Updated from global requirements * Move eventlet using commands into own directory * adjust alarm post ut code to adapt to upstream wsme * Disable rgw pollster when aws module not found * Fixes DiskInfoPollster AttributeError exception * remove useless log message * use oslo.log instead of oslo-incubator code * Port test\_inspector to Python 3 * Fix usage of dictionary methods on Python 3 * Imported Translations from Transifex * Add oslo.vmware to Python 3 test dependencies * Optionally create trust for alarm actions * Remove iso8601 dependency * Enable test\_swift\_middleware on Python 3 * Enable more tests on Python 3 * Skip hbase tests on Python 3 * Clear useless exclude from flake8 ignore in tox * Remove pagination code * Stop importing print\_function * Remove useless release script in tools * Remove useless dependency on posix\_ipc * Remove exceute bit on HTTP dispatcher * Remove oslo.messaging compat from Havana * Fixing event types pattern for Role Noti. handler * Mask database.event\_connection details in logs * Switch from MySQL-python to PyMySQL * Python 3: replace long with int * Python 3: Replace unicode with six.text\_type * Python 3: generalize the usage of the six module * Update Python 3 requirements * Python 3: set \_\_bool\_\_() method on Namespace * Python 3: encode to UTF-8 when needed * Python 3: sort tables by their full name * Python 3: replace sys.maxint with sys.maxsize * Initial commit for functional tests * Update a test to properly anticipate HTTP 405 for RestController * proposal to add Chris Dent to Ceilometer core * rebuild event model only for database writes * cleanup problem events logic in event db storage * fix incorrect docstring for dispatcher * Imported Translations from Transifex * api: record severity change in alarm history * VMware: verify vCenter server certificate * Add hardware memory buffer and cache metrics * Make interval optional in pipeline * Improve ceilometer-api install documentation * empty non-string values are returned as string traits * Trait\_\* models have incorrect type for key * small change to development.rst file * Drop use of 'oslo' namespace package * [unittests] Increase agent module unittests coverage * stop mocking os.path in test\_setup\_events\_default\_config * Remove py33 tox target * made change to mod\_wsgi.rst file * ensure collections created on upgrade * Fix raise error when run "tox -egenconfig" * Updated from global requirements * Fix None TypeError in neutron process notifications * Have eventlet monkeypatch the time module * Have eventlet monkeypatch the time module * Add the function of deleting alarm history * Updated from global requirements * Fix valueerror when ceilometer-api start * Override gnocchi\_url configuration in test * Move ceilometer/cli.py to ceilometer/cmd/sample.py * Fix valueerror when ceilometer-api start * remove deprecated partitioned alarm service * use message id to generate hbase unique key * gnocchi: fix typo in the aggregation endpoint * Release Import of Translations from Transifex * Fix Copyright date in docs * Replace 'metrics' with 'meters' in option and doc * use message id to generate hbase unique key * update .gitreview for stable/kilo * gnocchi: fix typo in the aggregation endpoint * broadcast data to relevant queues only * Imported Translations from Transifex * fix combination alarm with operator == 'or' * Updated from global requirements * proposal to add ZhiQiang Fan to Ceilometer core * Open Liberty development * Fix a samples xfail test that now succeeds * Cosmetic changes for system architecture docs * Fix a issue for kafka-publisher and refactor the test code * pymongo 3.0 breaks ci gate * use oslo.messaging dispatch filter * Further mock adjustments to deal with intermittent failure * Adds support for default rule in ceilometer policy.json * Updated from global requirements * limit alarm actions * Use oslo\_vmware instead of deprecated oslo.vmware * Remove 'samples:groupby' from the Capabilities list * Use old name of 'hardware.ipmi.node.temperature' * Revert "remove instance: meter" * Tweak authenticate event definition * Add project and domain ID to event definition for identity CRUD * Fix the event type for trusts * reset croniter to avoid cur time shift * Imported Translations from Transifex * Avoid a error when py27 and py-mysql tests run in sequence * Stop using PYTHONHASHSEED=0 in ceilometer tests * remove instance: meter * Added ipv6 support for udp publisher * Remove the unnecessary dependency to netaddr * Optimize the flow of getting pollster resources * support ability to skip message signing * Avoid conflict with existing gnocchi\_url conf value * Using oslo.db retry decorator for sample create * alarm: Use new gnocchi aggregation API * collector: enable the service to listen on IPv6 * minimise the use of hmac * Typo in pylintrc * Ceilometer retrieve all images by 'all-tenants' * fix incorrect key check in swift notifications * support disabling profiler and http meters * ensure collections created on upgrade * Fix common misspellings * Updated from global requirements * refuse to post sample which is not supported * Enable collector to requeue samples when enabled * drop deprecated novaclient.v1\_1 * exclude precise metaquery in query field * Imported Translations from Transifex * remove log message when process notification * Add gabbi tests for resources * Fix typos and format in docstrings in http dispatcher * add ability to dispatch events to http target * doc: fix class name * add ability to publish to multiple topics * make field and value attributes mandatory in API Query * Fix db2 upgrade in multi-thread run issue * Add memory.resident libvirt meter for Ceilometer * Update reference * Check the namespaces duplication for ceilometer-polling * Add gabbi tests to explore the Meter and MetersControllers * Imported Translations from Transifex * mysql doesn't understand intersect * order traits returned within events * add network, kv-store, and http events * Add support for additional identity events * Add a Kafka publisher as a Ceilometer publisher * Fix response POST /v2/meters/(meter\_name) to 201 status * Attempt to set user\_id for identity events * Switch to oslo.policy 0.3.0 * normalise timestamp in query * Add more power and thermal data * Updated from global requirements * Fix formatting error in licence * Added option to allow sample expiration more frequently * add option to store raw notification * use mongodb distinct * remove event\_types ordering assumption * Add gabbi tests to cover the SamplesController * api: fix alarm creation if time\_constraint is null * fix log message format in event.storage.impl\_sqlalchemy * Remove duplications from docco * Tidy up clean-samples.yaml * Fix a few typos in the docs * use default trait type in event list query * fix wrong string format in libvirt inspector * create a developer section and refactor * Do not default pecan\_debug to CONF.debug * Adding Gabbi Tests to Events API * fix config opts in objectstore.rgw * Updated from global requirements * support time to live on event database for sql backend * add an option to disable non-metric meters * add missing objectstore entry points * Initial gabbi testing for alarms * reorganise architecture page * Add ceph object storage meters * Use oslo\_config choices support * fix inline multiple assignment * alarming: add gnocchi alarm rules * Protect agent startup from import errors in plugins * Revert "Add ceph object storage meters" * api: move alarm rules into they directory * compress events notes * Destroy fixture database after each gabbi TestSuite * Fix unittests for supporting py-pgsql env * Adding links API and CLI query examples * correct column types in events * Be explicit about using /tmp for temporary datafiles * Patch for fixing hardware.memory.used metric * Add ceph object storage meters * [PostgreSQL] Fix regexp operator * Add clean\_exit for py-pgsql unit tests * modify events sql schema to reduce empty columns * Remove duplicated resource when pollster polling * check metering\_connection attribute by default * unicode error in event converter * cleanup measurements page * api: add missing combination\_rule field in sample * Fix test case of self-disabled pollster * update event architecture diagram * use configured max\_retries and retry\_interval for database connection * Updated from global requirements * Making utilization the default spelling * Add Disk Meters for ceilometer * correctly leave group when process is stopped * Updated from global requirements * enable oslo namespace check for ceilometer project * Add doc for version list API * Enabling self-disabled pollster * Use werkzeug to run the developement API server * Imported Translations from Transifex * switch to oslo\_serialization * move non-essential libs to test-requirements * Validate default values in config * fix the value of query\_spec.maxSample to advoid to be zero * clean up to use common service code * Add more sql test scenarios * [SQLalchemy] Add regex to complex queries * Fix duplication in sinks names * metering data ttl sql backend breaks resource metadata * Refactor unit test code for disk pollsters * start recording error notifications * Remove no\_resource hack for IPMI pollster * Add local node resource for IPMI pollsters * Use stevedore to load alarm rules api * [MongoDB] Add regex to complex queries * Imported Translations from Transifex * support time to live on event database for MongoDB * split api.controllers.v2 * add elasticsearch events db * use debug value for pecan\_debug default * Shuffle agents to send request * Updated from global requirements * Adds disk iops metrics implementation in Hyper-V Inspector * discovery: allow to discover all endpoints * Declarative HTTP testing for the Ceilometer API * add listener to pick up notification from ceilometermiddleware * Drop deprecated namespace for oslo.rootwrap * remove empty module tests.collector * Add disk latency metrics implementation in Hyper-V Inspector * add event listener to collector * add notifier publisher for events * enable event pipeline * Imported Translations from Transifex * deprecate swift middleware * sync oslo and bring in versionutils * Expose alarm severity in Alarm Model * Hyper-V: Adds memory metrics implementation * Remove mox from requirements * Fix IPMI unit test to cover different platforms * adjust import group order in db2 ut code * add event pipeline * remove unexistent module from doc/source/conf.py * Upgrade to hacking 0.10 * Remove the Nova notifier * Remove argparse from requirements * [MongoDB] Improves get\_meter\_statistics method * Fix docs repeating measuring units * [DB2 nosql] Create TIMESTAMP type index for 'timestamp' field * remove pytidylib and netifaces from tox.ini external dependency * Avoid unnecessary API dependency on tooz & ceilometerclient * Correct name of "ipmi" options group * Fix Opencontrail pollster according the API changes * enable tests.storage.test\_impl\_mongodb * Remove lockfile from requirements * Disable eventlet monkey-patching of DNS * Expose vm's metadata to metrics * Adding build folders & sorting gitignore * Disable proxy in unit test case of test\_bin * Add Event and Trait API to document * Refactor ipmi agent manager * Use alarm's evaluation periods in sufficient test * Use oslo\_config instead of deprecated oslo.config * Avoid executing ipmitool in IPMI unit test * Updated from global requirements * Add a direct to database publisher * Fixed MagnetoDB metrics title * Imported Translations from Transifex * Fix incorrect test case name in test\_net.py * Updated from global requirements * notification agent missing CONF option * switch to oslo\_i18n * Use right function to create extension list for agent test * Imported Translations from Transifex * Add an exchange for Zaqar in profiler notification plugin * Remove unused pecan configuration options * Updated from global requirements * Use oslo\_utils instead of deprecated oslo.utils * Match the meter names for network services * stop using private timeutils attribute * Update measurement docs for network services * Catch exception when evaluate single alarm * Return a meaningful value or raise an excpetion for libvirt * Imported Translations from Transifex * make transformers optional in pipeline * Added metering for magnetodb * Add release notes URL for Juno * Fix release notes URL for Icehouse * remove unnecessary str method when log messages * Revert "Remove Sphinx from py33 requirements" * untie pipeline manager from samples * reset listeners on agent refresh * Remove inspect\_instances method from virt * Optimize resource list query * Synchronize Python 3 requirements * Remove unnecessary import\_opt|group * Add test data generator via oslo messaging * Check to skip to poll and publish when no resource * Add oslo.concurrency module to tox --env genconfig * add glance events * add cinder events * Manual update from global requirements * Add cmd.polling.CLI\_OPTS to option list * Ignore ceilometer.conf * Switch to oslo.context library * Revert "Skip to poll and publish when no resources found" * Added missing measurements and corrected errors in doc * Remove Sphinx from py33 requirements * Clean up bin directory * Improve tools/make\_test\_data.sh correctness * ensure unique pipeline names * implement notification coordination * Make methods static where possible (except openstack.common) * Fix docs to suit merged compute/central agents concept * Drop anyjson * Move central agent code to the polling agent module * RBAC Support for Ceilometer API Implementation * [SQLalchemy] Add groupby ability resource\_metadata * Improve links in config docs * Make LBaaS total\_connections cumulative * remove useless looping in pipeline * Encompassing one source pollsters with common context * Modify tests to support ordering of wsme types * Make compute discovery pollster-based, not agent-level * Add docs about volume/snapshot measurements * Port to graduated library oslo.i18n * Retry to connect database when DB2 or mongodb is restarted * Updated from global requirements * Standardize timestamp fields of ceilometer API * Workflow documentation is now in infra-manual * Add alarm\_name field to alarm notification * Updated from global requirements * Rely on VM UUID to fetch metrics in libvirt * Imported Translations from Transifex * Initializing a longer resource id in DB2 nosql backend * Sync oslo-incubator code to latest * ensure unique list of consumers created * fix import oslo.concurrency issue * Add some rally scenarios * Do not print snmpd password in logs * Miniscule typo in metering\_connection help string * add http dispatcher * [MongoDB] Add groupby ability on resource\_metadata * [MongoDB] Fix bug with 'bad' chars in metadatas keys * Override retry\_interval in MongoAutoReconnectTest * Exclude tools/lintstack.head.py for pep8 check * Add encoding of rows and qualifiers in impl\_hbase * Database.max\_retries only override on sqlalchemy side * Support to capture network services notifications * Internal error with period overflow * Remove Python 2.6 classifier * Enable pep8 on ./tools directory * Imported Translations from Transifex * Fixes Hyper-V Inspector disk metrics cache issue * fix swift middleware parsing * Fix order of arguments in assertEqual * Updated from global requirements * Adapting pylint runner to the new message format * Validate AdvEnum & return an InvalidInput on error * add sahara and heat events * add keystone events to definitions * Add timeout to all http requests * [MongoDB] Refactor time to live feature * transform samples only when transformers exist * Updated from global requirements * Remove module not really used by Ceilometer * Switch to oslo.concurrency * Skip to poll and publish when no resources found * Change event type for identity trust notifications * Add mysql and postgresql in tox for debug env * Add new notifications types for volumes/snapshots * Add encoding to keys in compute\_signature * Tests for system and network aggregate pollsters * Add bandwidth to measurements * Fix wrong example of capabilities * Correct the mongodb\_replica\_set option's description * Alarms listing based on "timestamp" * Use 'pg\_ctl' utility to start and stop database * Correct alarm timestamp field in unittest code * Refactor kwapi unit test * Remove duplicated config doc * VMware: Enable VMware inspector to support any port * Clean event method difinition in meter storage base * Fix some nits or typos found by chance * Add Sample ReST API path in webapi document * Enable filter alarms by their type * Fix storage.hbase.util.prepare\_key() for 32-bits system * Add event storage for test\_hbase\_table\_utils * Add per device rate metrics for instances * Fix hacking rule H305 imports not grouped correctly * Add \_\_repr\_\_ method for sample.Sample * remove ordereddict requirement * Improve manual.rst file * Imported Translations from Transifex * Fix columns migrating for PostgreSQL * Updated from global requirements * Updated from global requirements * [MongoDB] Fix bug with reconnection to new master node * Updated from global requirements * support request-id * Update coverage job to references correct file * remove reference to model in migration * Use oslo\_debug\_helper and remove our own version * Allow collector service database connection retry * refresh ceilometer architecture documentation * Edits assert methods * Adds memory stats meter to libvirt inspector * Edits assert methods * Edits assert methods * Edits assert methods * Edits assert method * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * add script to generate test event data * Handle poorly formed individual sensor readings * refactor hbase storage code * Avoid clobbering existing class definition * Hoist duplicated AlarmService initialization to super * Clarify deprecation comment to be accurate * Work toward Python 3.4 support and testing * Fix recording failure for system pollster * sync and clean up oslo * Add missing notification options to the documentation * Add missing alarm options to the documentation * Add oslo.db to config generator * Add missed control exchange options to the documentation * Add coordination related options to the documentation * Add missing collector options to the documentation * switch to oslo-config-generator * Edit docs for docs.opentack.org/developer/ * Add oslo.db to config generator * Fix signature validation failure when using qpid message queue * clean capabilities * move db2 and mongo driver to event tree * move sql event driver to event tree * move hbase event driver to event tree * Sets default encoding for PostgreSQL testing * update database dispatcher to use events db * Add role assignment notifications for identity * add mailmap to avoid dup of authors * Add user\_metadata to network samples * Fix recording failure for system pollster * Manually updated translations * Updated from global requirements * Creates one database per sql test * Adds pylint check for critical error in new patches * Fix neutron client to catch 404 exceptions * Fix OrderedDict usage for Python 2.6 * Include a 'node' key and value in ipmi metadata * clean path in swift middleware * Implement redesigned separator in names of columns in HBase * [HBase] Add migration script for new row separate design * Imported Translations from Transifex * Include a 'node' key and value in ipmi metadata * Updated from global requirements * Run unit tests against PostgreSQL * create skeleton files for event storage backends * Imported Translations from Transifex * isolate event storage models * Fix neutron client to catch 404 exceptions * Run unit tests against MySQL * Updated from global requirements * Correct JSON-based query examples in documentation * Open Kilo development * Add cfg.CONF.import\_group for service\_credentials * Fix OrderedDict usage for Python 2.6 * clean path in swift middleware * Partition static resources defined in pipeline.yaml * Per-source separation of static resources & discovery * dbsync: Acknowledge 'metering\_connection' option * Fix bug in the documentation * Use oslo.msg retry API in rpc publisher * Describe API versions * Change compute agent recurring logs from INFO to DEBUG * Fix bug with wrong bool opt value interpolation * [HBase] Improves speed of unit tests on real HBase backend * Imported Translations from Transifex * Removed unused abc meta class * update references to auth\_token middleware * clean up swift middleware to avoid unicode errors * [HBase] Catch AlreadyExists error in Connection upgrade * Use None instead of mutables in method params default values * Updated from global requirements * Enable to get service types from configuration file * test db2 driver code * Docs: Add description of pipeline discovery section * Typo "possibilites" should be "possibilities" * Modified docs to update DevStack's config filename * Add an API configuration section to docs * Tune up mod\_wsgi settings in example configuration * Allow pecan debug middleware to be turned off * Provide \_\_repr\_\_ for SampleFilter * Eliminate unnecessary search for test cases * Switch to a custom NotImplementedError * minimise ceilometer memory usage * Partition swift pollster resources by tenant * Add IPMI pollster * Add IPMI support * Stop using intersphinx * Use central agent manager's keystone token in discoveries * Handle invalid JSON filters from the input gracefully * Sync jsonutils for namedtuple\_as\_object fix * ceilometer spamming syslog * Timestamp bounds need not be tight (per ceilometer 1288372) * Allow to pass dict from resource discovery * fix network discovery meters * switch to sqlalchemy core * Imported Translations from Transifex * Improve the timestamp validation of ceilometer API * Update docs with Sahara notifications configuration * Migrate the rest of the central agent pollsters to use discoveries * Add documentation for implemented identity meters * Fix tests with testtools>=0.9.39 * Document the standard for PaaS service notifications * Returns 401 when unauthorized project access occurs * Adding another set of hardware metrics * normalise resource data * warn against sorting requirements * Add validate alarm\_actions schema in alarm API * Fix help strings * Imported Translations from Transifex * Switch partitioned alarm evaluation to a hash-based approach * Central agent work-load partitioning * collector: Allows to requeue a sample * Typo fixed * Switch to oslo.serialization * Document pipeline publishers configuration * Alarm: Use stevedore to load the service class * Enhance compute diskio tests to handle multi instance * Adding comparison operators in query for event traits * XenAPI support: Update measurements documentation * update requirements * add documentation for setting up api pipeline * Permit usage of notifications for metering * XenAPI support: Disk rates * XenAPI support: Changes for networking metrics * XenAPI support: Memory Usage * XenAPI support: Changes for cpu\_util * XenAPI support: List the instances * Rebase hardware pollsters to use new inspector interface * Switch to use oslo.db * Remove oslo middleware * Adding quotas on alarms * Add an exchange for Trove in profiler notification plugin * Simplify chained comparisons * In-code comments should start with \`#\`, not with \`"""\` * Remove redundant parentheses * skip polls if service is not registered * re-add hashseed to avoid gate error * Switch to oslo.utils * Switch to oslotest * Handle sqlalchemy connection strings with drivers * Rewrite list creation as a list literal * Rewrite dictionary creation as a dictionary literal * Triple double-quoted strings should be used for docstrings * Add upgrading alarm storage in dbsync * Improving of configuration.rst * Fix typos in transformer docstrings * Update tox.ini pep8 config to ignore i18n functions * Added new hardware inspector interface * compute: fix wrong test assertion * sync olso-incubator code * VMware: Support secret host\_password option * refactor filter code in sql backend * Support for per disk volume measurements * Use a FakeRequest object to test middleware * Imported Translations from Transifex * Improve api\_paste\_config file searching * [Hbase] Add column for source filter in \_get\_meter\_samples * Issue one SQL statement per execute() call * Allow tests to run outside tox * [HBase] Refactor hbase.utils * Set page size when Glance API request is called * Adding init into tools folder * Enhancing the make\_test\_data script * correct DB2 installation supported features documentation * Avoid duplication of discovery for multi-sink sources * Improve performance of libvirt inspector requests * Documented Stevedore usage and source details * Add notifications for identity authenticate events * Add message translate module in vmware inspector * Handle Cinder attach and detach notifications * [HBase] Improve uniqueness for row in meter table * Doc enhancement for API service deployment with mod\_wsgi * Update documentation for new transformer * Add the arithmetic transformer endpoint to setup.cfg * Imported Translations from Transifex * Fix unit for vpn connection metric * Debug env for tox * Change spelling mistakes * Use auth\_token from keystonemiddleware * Fix dict and set order related issues in tests * Fix listener for update.start notifications * Sahara integration with Ceilometer * Add notifications for identity CRUD events * Extracting make\_resource\_metadata method * Fix make\_test\_data tools script * Add cumulative and gauge to aggregator transformer * Enable some tests against py33 * Remove --tmpdir from mktemp * Replace dict.iteritems() with six.iteritems(dict) * Replace iterator.next() with next(iterator) * Fix aggregator flush method * Automatic discovery of TripleO Overcloud hardware * Set python hash seed to 0 in tox.ini * Don't override the original notification message * Remove ConnectionProxy temporary class * Move sqlalchemy alarms driver code to alarm tree * basestring replaced with six.string\_types * Correct misspelled words * Add retry function for alarm REST notifier * Move hbase alarms driver code to alarm tree * Update measurement docs for FWaaS * Update measurement docs for VPNaaS * Follow up fixes to network services pollsters * Updated from global requirements * Implement consuming ipmi notifications from Ironic * Support for metering FWaaS * Adds Content-Type to alarm REST notifier * Multi meter arithmetic transformer * Remove redudent space in doc string * Use None instead of mutables in test method params defaults * Add support for metering VPNaaS * Use resource discovery for Network Services * Change of get\_events and get\_traits method in MongoDB and Hbase * Fix two out-dated links in doc * Move log alarms driver code to alarm tree * Separate the console scripts * clean up event model * improve expirer performance for sql backend * Move mongodb/db2 alarms driver code to alarm tree * Allow to have different DB for alarm and metering * Replace datetime of time\_constraints by aware object * Sync oslo log module and its dependencies * Use hmac.compare\_digest to compare signature * Add testcase for multiple discovery-driven sources * Fixes aggregator transformer timestamp and user input handling * Improves pipeline transformer documentation * Fix incorrect use of timestamp in test * Add keystone control exchange * Fix call to meter-list in measurements doc * Remove redundant parentheses * [Mongodb] Implement events on Mongodb and DB2 * Fix typos in code comments & docstrings * Make the error message of alarm-not-found clear * Fix SQL exception getting statitics with metaquery * Remove docutils pin * update default\_log\_levels set by ceilometer * Fix annoying typo in partition coordinator test * Transform sample\_cnt type to int * Remove useless sources.json * Fix H405 violations and re-enable gating * Fix H904 violations and re-enable gating * Fix H307 violations and re-enable gating * Fix the section name in CONTRIBUTING.rst * Added osprofiler notifications plugin * Improve a bit performance of Ceilometer * Revert "Align to openstack python package index mirror" * Fix aggregator \_get\_unique\_key method * Remove meter hardware.network.bandwidth.bytes * Fix F402 violations and re-enable gating * Fix E265 violations and re-enable gating * Fix E251 violations and re-enable gating * Fix E128 violations and re-enable gating * Fix E126,H104 violations and re-enable gating * Bump hacking to 0.9.x * Fixed various import issues exposed by unittest * use urlparse from six * clean up sample index * Fix HBase available capabilities list * Updated from global requirements * VMware:Update the ceilometer doc with VMware opts * Handle non-ascii character in meter name * Add log output of "x-openstack-request-id" from nova * Imported Translations from Transifex * fix StringIO errors in unit test * Fix hacking rule 302 and enable it * Imported Translations from Transifex * sync oslo code * Fixes ceilometer-compute service start failure * Reenables the testr per test timeout * Avoid reading real config files in unit test * Clean up oslo.middleware.{audit,notifier} * Use hacking from test-requirements * Splits hbase storage code base * Splits mongo storage code base * Separate alarm storage models from other models * Iterates swift response earlier to get the correct status * Fix messaging.get\_transport caching * Fix method mocked in a test * Don't keep a single global TRANSPORT object * Clean up .gitignore * Fix Sphinx directive name in session.py * Fix list of modules not included in auto-gen docs * Downgrade publisher logging to debug level again * remove default=None for config options * [HBase] get\_resource optimization * Fix incorrect trait initialization * Remove unused logging in tests * Revert "Fix the floatingip pollster" * Remove low-value logging from publication codepath * Fix LBaaS connection meter docs * Fix the meter type for LB Bytes * Adding alarm list filtering by state and meter * Adds caches for image and flavor in compute agent * [HBase] Implement events on HBase * Skipping central agent pollster when keystone not available * Respect $TMPDIR environment variable to run tests * Fixed unit test TestRealNotification * Update Measurement Docs for LBaaS * Metering LoadBalancer as a Service * Removes per test testr timeout * Change pipeline\_manager to instance attribute in hooks * Change using of limit argument in get\_sample * Refactor tests to remove direct access to test DBManagers * Fix notification for NotImplemented record\_events * Add missing explicit cfg option import * Fix ceilometer.alarm.notifier.trust import * Use TYPE\_GAUGE rather than TYPE\_CUMULATIVE * Update doc for sample config file issue * Corrects a flaw in the treatment of swift endpoints * use LOG instead of logger as name for the Logger object * Fix doc gate job false success * Improve performance of api requests with hbase scan * Add new 'storage': {'production\_ready': True} capability * Clean tox.ini * Remove (c) and remove unnecessary encoding lines * Fix testing gate due to new keystoneclient release * Ignore the generated file ceilometer.conf.sample * Update the copyright date in doc * Updated from global requirements * reconnect to mongodb on connection failure * refactor sql backend to improve write speed * Don't rely on oslomsg configuration options * replaced unicode() with six.text\_type() * Synced jsonutils from oslo-incubator * Fix the floatingip pollster * Fix project authorization check * Update testrepository configuration * Implemented metering for Cinder's snapshots * Use joins instead of subqueries for metadata filtering * Use None instead of mutables in method params defaults * Remove all mostly untranslated PO files * switch SplitResult to use six * Remove unused db code due to api v1 drop * Updated from global requirements * oslo.messaging context must be a dict * Drop deprecated api v1 * Fix network notifications of neutron bulk creation * mongo: remove \_id in inserted alarm changes * Clean up openstack-common.conf * Revert "oslo.messaging context must be a dict" * Correct class when stopping partitioned alarm eval svc * oslo.messaging context must be a dict * Corrections of spelling, rephrasing for clarity * Adapt failing tests for latest wsme version * Removed StorageEngine class and it's hierarchy * Correcting formatting and adding period in measurement doc * Initialize dispatcher manager in event endpoint * Replaced CONF object with url in storage engine creation * Synced jsonutils from oslo-incubator * Remove gettextutils.\_ imports where they are not used * Remove "# noqa" leftovers for gettextutils.\_ * transformer: Add aggregator transformer * Remove conversion debug message * Fix the return of statistic with getting no sample * Remove eventlet.sleep(0) in collector tests * Don't allow queries with 'IN' predicate with an empty sequence * Check if samples returned by get\_sample\_data are not None * Opencontrail network statistics driver * Add a alarm notification using trusts * Replace hard coded WSGI application creation * Describe storage backends in the collector installation guide * Made get\_capabilities a classmethod instead of object method * Disable reverse dns lookup * Consume notif. from multiple message bus * Use NotificationPlugin as an oslo.msg endpoint * Improve combination rule validation * Remove ceilometer.conf.sample * Use known protocol scheme in keystone tests * cleanup virt pollster code * Add encoding argument to deserialising udp packets in collector * Made get\_engine method module-private * Make entities (Resource, User, Project) able to store lists * Remove duplicate alarm from alarm\_ids * More accurate meter name and unit for host load averages * Replace oslo.rpc by oslo.messaging * Fix a response header bug in the error middleware * Remove unnecessary escape character in string format * Optimize checks to set image properties in metadata * fix statistics query in postgres * Removed useless code from \_\_init\_\_ method * Refactored fake connection URL classes * Replace assert statements with assert methods * Removes direct access of timeutils.override\_time * Disable specifying alarm itself in combination rule * Include instance state in metadata * Allowed nested resource metadata in POST'd samples * Sync oslo-incubator code * Updated from global requirements * Refactor the DB implementation of Capabilities API * Fix Jenkins translation jobs * Align to openstack python package index mirror * User a more accurate max\_delay for reconnects * Open Juno development * Imported Translations from Transifex * Add note on aggregate duplication to API docco * Use ConectionPool instead of one Connection in HBase * remove dump tables from previous migrations * De-dupe selectable aggregate list in statistics API * ensure dispatcher service is configured before rpc * improve performance of resource-list in sql * SSL errors thrown with Postgres on multi workers * Remove escape character in string format * Verify user/project ID for alarm created by non-admin user * enable a single worker by default * Fix ceilometer.conf.sample mismatch * Metadata in compute.instance.exists fix * Fix order of arguments in assertEquals * Documenting hypervisor support for nova meters * Ensure idempotency of cardinality reduction in mongo * VMware vSphere: Improve the accuracy of queried samples * Use swob instead of webob in swift unit tests * Disable oslo.messaging debug logs * Fix validation error for invalid field name in simple query * fix create\_or\_update logic to avoid rollbacks * Avoid swallowing AssertionError in test skipping logic * Fix hardware pollster to inspect multiple resources * spawn multiple workers in services * Install global lazy \_() * Fixes Hyper-V metrics units * Ensure intended indices on project\_id are created for mongo * Fix the type of the disk IO rate measurements * Change the sample\_type from tuple to string * Fix order of arguments in assertEquals * Ensure alarm rule conform to alarm type * insecure flag added to novaclient * Fixes duplicated names in alarm time constraints * Use the list when get information from libvirt * Eventlet monkeypatch must be done before anything * 028 migration script incorrectly skips over section * Fix bug in get\_capabilities behavior in DB drivers * Added documentation for selectable aggregates * Make sure use IPv6 sockets for ceilometer in IPv6 environment * VMware vSphere: Bug fixes * Ensure insecure config option propagated by alarm evaluator * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Rationalize get\_resources for mongodb * Ensure insecure config option propagated by alarm service * add host meters to doc * Add field translation to complex query from OldSample to Sample * Extend test case to cover old alarm style conversion * Updated doc with debug instructions * Refactored the way how testscenarios tests are run * Corrected the sample names in hardware pollsters * Prevent alarm\_id in query field of getting history * Make ceilometer work with sqla 0.9.x * Implements monitoring-network-from-opendaylight * Add user-supplied arguments in log\_handler * VMware vSphere support: Disk rates * Fix updating alarm can specify existing alarm name * Changes for networking metrics support for vSphere * VMware vSphere: Changes for cpu\_util * VMware vSphere support: Memory Usage * Fix broken statistics in sqlalchemy * Fixes Hyper-V Inspector network metrics values * Set storage engine for the trait\_type table * Enable monkeypatch for select module * Rename id to alarm\_id of Alarm in SqlAlchemy * Fix some spelling mistakes and a incorrect url * Skip central agent interval\_task when keystone fails * Ensure user metadata mapped for instance notifications * Per pipeline pluggable resource discovery * Wider selection of aggregates for sqlalchemy * Wider selection of aggregates for mongodb * Adds time constraints to alarms * Remove code duplication Part 3 * Decouple source and sink configuration for pipelines * Selectable aggregate support in mongodb * Selectable aggregation functions for statistics * Add simple capabilities API * Removed global state modification by api test * VMware vSphere support: Performance Mgr APIs * Fix typo * move databases to test requirements * Make recording and scanning data more determined * Implements "not" operator for complex query * Implements metadata query for complex query feature * Alarms support in HBase Part 2 * Alarm support in HBase Part 1 * Remove unused variable * Added hardware pollsters for the central agent * Added hardware agent's inspector and snmp implementation * Updated from global requirements * Pluggable resource discovery for agents * Remove code duplication Part 2 * Imported Translations from Transifex * remove audit logging on flush * Tolerate absent recorded\_at on older mongo/db2 samples * api: export recorded\_at in returned samples * Fix the way how metadata is stored in HBase * Set default log level of iso8601 to WARN * Sync latest config file generator from oslo-incubator * Fix typo on testing doc page * Remove code duplication * sample table contains redundant/duplicate data * rename meter table to sample * storage: store recording timestamp * Fixed spelling error in Ceilometer * Adds doc string to query validate functions in V2 API * Updated from global requirements * Remove code that works around a (now-resolved) bug in pecan * Fix missing source field content on /v2/samples API * Refactor timestamp existence validation in V2 API * Use the module units to refer bytes type * sync units.py from oslo to ceilometer * Add comments for \_build\_paginate\_query * Implements monitoring-network * Handle Heat notifications for stack CRUD * Alembic migrations not tested * Modify the discription of combination alarm * check domain state before inspecting nics/disks * Adds gettextutils module in converter * Keep py3.X compatibility for urllib.urlencode * Added missing import * Removed useless prints that pollute tests log * Implements in operator for complex query functionality * Implements field validation for complex query functionality * allow hacking to set dependencies * Implements complex query functionality for alarm history * Implements complex query functionality for alarms * Remove None for dict.get() * Replace assertEqual(None, \*) with assertIsNone in tests * Update notification\_driver * Switch over to oslosphinx * Fix some flaws in ceilometer docstrings * Rename Openstack to OpenStack * Remove start index 0 in range() * Updated from global requirements * Remove blank line in docstring * Use six.moves.urllib.parse instead of urlparse * Propogate cacert and insecure flags to glanceclient * Test case for creating an alarm without auth headers * Refactored run-tests script * Implements complex query functionality for samples * fix column name and alignment * Remove tox locale overrides * Updated from global requirements * Adds flavor\_id in the nova\_notifier * Improve help strings * service: re-enable eventlet just for sockets * Fixes invalid key in Neutron notifications * Replace BoundedInt with WSME's IntegerType * Replace 'Ceilometer' by 'Telemetry' in the generated doc * Doc: Add OldSample to v2.rst * Fixing some simple documentation typos * Updated from global requirements * Fix for a simple typo * Replace 'a alarm' by 'an alarm' * Move ceilometer-send-counter to a console script * sync oslo common code * Handle engine creation inside of Connection object * Adds additional details to alarm notifications * Fix formating of compute-nova measurements table * Fix string-to-boolean casting in queries * nova notifier: disable tests + update sample conf * Update oslo * Refactored session access * Fix the py27 failure because of "ephemeral\_key\_uuid" error * Correct a misuse of RestController in the Event API * Fix docs on what an instance meter represents * Fix measurement docs to correctly represent Existance meters * samples: fix test case status code check * Replace non-ascii symbols in docs * Use swift master * Add table prefix for unit tests with hbase * Add documentation for pipeline configuration * Remove unnecessary code from alarm test * Updated from global requirements * Use stevedore's make\_test\_instance * use common code for migrations * Use explicit http error code for api v2 * Clean .gitignore * Remove unused db engine variable in api * Revert "Ensure we are not exhausting the sqlalchemy pool" * eventlet: stop monkey patching * Update dev docs to include notification-agent * Change meter\_id to meter\_name in generated docs * Correct spelling of logger for dispatcher.file * Fix some typos in architecture doc * Drop foreign key contraints of alarm in sqlalchemy * Re-enable lazy translation * Sync gettextutils from Oslo * Fix wrong doc string for meter type * Fix recursive\_keypairs output * Added abc.ABCMeta metaclass for abstract classes * Removes use of timeutils.set\_time\_override * tests: kill all started processes on exit * Exclude weak datapoints from alarm threshold evaluation * Move enable\_acl and debug config to ceilometer.conf * Fix the Alarm documentation of Web API V2 * StringIO compatibility for python3 * Set the SQL Float precision * Convert alarm timestamp to PrecisionTimestamp * use six.move.xrange replace xrange * Exit expirer earlier if db-ttl is disabled * Added resources support in pollster's interface * Improve consistency of help strings * assertTrue(isinstance) replace by assertIsInstance * Return trait type from Event api * Add new rate-based disk and network pipelines * Name and unit mapping for rate\_of\_change transformer * Update oslo * Remove dependencies on pep8, pyflakes and flake8 * Implement the /v2/samples/ API * Fix to handle null threshold\_rule values * Use DEFAULT section for dispatcher in doc * Insertion in HBase should be fixed * Trivial typo * Update ceilometer.conf.sample * Fix use the fact that empty sequences are false * Remove unused imports * Replace mongo aggregation with plain ol' map-reduce * Remove redundant meter (name,type,unit) tuples from Resource model * Fix work of udp publisher * tests: pass /dev/null as config for mongod * requirements: drop netaddr * tests: allow to skip if no database URL * Fix to tackle instances without an image assigned * Check for pep8 E226 and E24 * Fixed spelling mistake * AlarmChange definition added to doc/source/webapi/v2.rst * 1st & last sample timestamps in Resource representation * Avoid false negatives on message signature comparison * cacert is not picked up correctly by alarm services * Change endpoint\_type parameter * Utilizes assertIsNone and assertIsNotNone * Add missing gettextutils import to ceilometer.storage.base * Remove redundant code in nova\_client.Client * Allow customized reseller\_prefix in Ceilometer middleware for Swift * Fix broken i18n support * Empty files should no longer contain copyright * Add Event API * Ensure we are not exhausting the sqlalchemy pool * Add new meters for swift * Sync config generator workaround from oslo * storage: factorize not implemented methods * Don't assume alarms are returned in insert order * Correct env variable in file oslo.config.generator.rc * Handle the metrics sent by nova notifier * Add a wadl target to the documentation * Sync config generator from oslo-incubator * Convert event timestamp to PrecisionTimestamp * Add metadata query validation limitation * Ensure the correct error message is displayed * Imported Translations from Transifex * Move sphinxcontrib-httpdomain to test-requirements * Ensure that the user/project exist on alarm update * api: raise ClientSideError rather than ValueError * Implement the /v2/sample API * service: fix service alive checking * Oslo sync to recover from db2 server disconnects * Event Storage Layer * config: specify a template for mktemp * test code should be excluded from test coverage summary * doc: remove note about Nova plugin framework * doc: fix formatting of alarm action types * Updated from global requirements * Add configuration-driven conversion to Events * add newly added constraints to expire clear\_expired\_metering\_data * fix unit * Add import for publisher\_rpc option * add more test cases to improve the test code coverage #5 * Create a shared queue for QPID topic consumers * Properly reconnect subscribing clients when QPID broker restarts * Don't need session.flush in context managed by session * sql migration error in 020\_add\_metadata\_tables * Remove rpc service from agent manager * Imported Translations from Transifex * organise requirements files * Add a Trait Type model and db table * No module named MySQLdb bug * Add a note about permissions to ceilometer logging directory * sync with oslo-incubator * Rename OpenStack Metering to OpenStack Telemetry * update docs to adjust for naming change * Add i18n warpping for all LOG messages * Imported Translations from Transifex * Removed unused method in compute agent manger * connection is not close in migration script * Fixed a bug in sql migration script 020 * Fixed nova notifier test * Added resources definition in the pipeline * Change metadata\_int's value field to type bigint * Avoid intermittent integrity error on alarm creation * Simplify the dispatcher method prototype * Use map\_method from stevedore 0.12 * Remove the collector submodule * Move dispatcher a level up * Split collector * Add a specialized Event Type model and db table * Remove old sqlalchemy-migrate workaround * Revert "Support building wheels (PEP-427)" * full pep8 compliance (part 2) * Selectively import RPC backend retry config * Fixes Hyper-V Inspector disk metrics bug * Imported Translations from Transifex * full pep8 compliance (part1) * Replace mox with mock in alarm,central,image tests * Stop ignoring H506 errors * Update hacking for real * Replace mox with mock in tests.collector * Replace mox with mock in publisher and pipeline * Replace mox with mock in novaclient and compute * Remove useless defined Exception in tests * Support building wheels (PEP-427) * Fixes Hyper-V Inspector cpu metrics bug * Replace mox with mock in tests.storage * Document user-defined metadata for swift samples * Replace mox with mock in energy and objectstore * Updated from global requirements * Replace mox with mock in tests.api.v2 * Refactor API error handling * make record\_metering\_data concurrency safe * Move tests into ceilometer module * Replace mox with mock in tests.api.v1 * Replace mox with mock in tests.api.v2.test\_compute * Corrected import order * Use better predicates from testtools instead of plain assert * Stop using openstack.common.exception * Replace mox with mock in tests.network * Replace mox with mocks in test\_inspector * Fix failing nova\_tests tests * Replace mox with mocks in tests.compute.pollsters * Add an insecure option for Keystone client * Sync log from oslo * Cleanup tests.publisher tests * mongodb, db2: do not print full URL in logs * Use wsme ClientSideError to handle unicode string * Use consistant cache key for swift pollster * Fix the developer documentation of the alarm API * Fix the default rpc policy value * Allow Events without traits to be returned * Replace tests.base part8 * Replace tests.base part7 * Replace tests.base part6 * Imported Translations from Transifex * Imported Translations from Transifex * Sync log\_handler from Oslo * Don't use sqlachemy Metadata as global var * enable sql metadata query * Replace tests.base part5 * Replace tests.base part4 * Imported Translations from Transifex * Updated from global requirements * Fix doc typo in volume meter description * Updated from global requirements * Add source to Resource API object * compute: virt: Fix Instance creation * Fix for get\_resources with postgresql * Updated from global requirements * Add tests when admin set alarm owner to its own * Replace tests.base part3 * Replace tests.base part2 * Replace tests.base part1 * Fix wrong using of Metadata in 15,16 migrations * api: update for WSME 0.5b6 compliance * Changes FakeMemcache to set token to expire on utcnow + 5 mins * Change test case get\_alarm\_history\_on\_create * Change alarm\_history.detail to text type * Add support for keystoneclient 0.4.0 * Ceilometer has no such project-list subcommand * Avoid leaking admin-ness into combination alarms * Updated from global requirements * Avoid leaking admin-ness into threshold-oriented alarms * Update Oslo * Set python-six minimum version * Ensure combination alarms can be evaluated * Ensure combination alarm evaluator can be loaded * Apply six for metaclass * add more test cases to improve the test code coverage #6 * Update python-ceilometerclient lower bound to 1.0.6 * Imported Translations from Transifex * add more test cases to improve the test code coverage #4 * db2 does not allow None as a key for user\_id in user collection * Start Icehouse development * Imported Translations from Transifex * Disable lazy translation * Add notifications for alarm changes * Updated from global requirements * api: allow alarm creation for others project by admins * assertEquals is deprecated, use assertEqual * Imported Translations from Transifex * update alarm service setup in dev doc * Add bug number of some wsme issue * api: remove useless comments * issue an error log when cannot import libvirt * add coverage config file to control module coverage report * tests: fix rounding issue in timestamp comparison * api: return 404 if a alarm is not found * remove locals() for stringformat * add more test cases to improve the test code coverage #3 * Remove extraneous vim configuration comments * Return 401 when action is not authorized * api: return 404 if a resource is not found * keystone client changes in AuthProtocol made our test cases failing * Don't load into alarms evaluators disabled alarms * Remove MANIFEST.in * Allow to get a disabled alarm * Add example with return values in API v2 docs * Avoid imposing alembic 6.0 requirement on all distros * tests: fix places check for timestamp equality * Don't publish samples if resource\_id in missing * Require oslo.config 1.2.0 final * Don't send unuseful rpc alarm notification * service: check that timestamps are almost equals * Test the response body when deleting a alarm * Change resource.resource\_metadata to text type * Adding region name to service credentials * Fail tests early if mongod is not found * add more test cases to improve the test code coverage #2 * add more test cases to improve the test code coverage #1 * Imported Translations from Transifex * Replace OpenStack LLC with OpenStack Foundation * Use built-in print() instead of print statement * Simple alarm partitioning protocol based on AMQP fanout RPC * Handle manually mandatory field * Provide new API endpoint for alarm state * Implement the combination evaluator * Add alarm combination API * Notify with string representation of alarm reason * Convert BoundedInt value from json into int * Fix for timestamp precision in SQLAlchemy * Add source field to Meter model * Refactor threshold evaluator * Alarm API update * Update requirements * WSME 0.5b5 breaking unit tests * Fix failed downgrade in migrations * refactor db2 get\_meter\_statistics method to support mongodb and db2 * tests: import pipeline config * Fix a tiny mistake in api doc * collector-udp: use dispatcher rather than storage * Imported Translations from Transifex * Drop sitepackages=False from tox.ini * Update sphinxcontrib-pecanwsme to 0.3 * Architecture enhancements * Force MySQL to use InnoDB/utf8 * Update alembic requirement to 0.6.0 version * Correctly output the sample content in the file publisher * Pecan assuming meter names are extensions * Handle inst not found exceptions in pollsters * Catch exceptions from nova client in poll\_and\_publish * doc: fix storage backend features status * Add timestamp filtering cases in storage tests * Imported Translations from Transifex * Use global openstack requirements * Add group by statistics examples in API v2 docs * Add docstrings to some methods * add tests for \_query\_to\_kwargs func * validate counter\_type when posting samples * Include auth\_token middleware in sample config * Update config generator * run-tests: fix MongoDB start wait * Imported Translations from Transifex * Fix handling of bad paths in Swift middleware * Drop the \*.create.start notification for Neutron * Make the Swift-related doc more explicit * Fix to return latest resource metadata * Update the high level architecture * Alarm history storage implementation for sqlalchemy * Improve libvirt vnic parsing with missing mac! * Handle missing libvirt vnic targets! * Make type guessing for query args more robust * add MAINTAINERS file * nova\_notifier: fix tests * Update openstack.common.policy from oslo-incubator * Clean-ups related to alarm history patches * Improved MongoClient pooling to avoid out of connections error * Disable the pymongo pooling feature for tests * Fix wrong migrations * Fixed nova notifier unit test * Add group by statistics in API v2 * Update to tox 1.6 and setup.py develop * Add query support to alarm history API * Reject duplicate events * Fixes a bug in Kwapi pollster * alarm api: rename counter\_name to meter\_name * Fixes service startup issue on Windows * Handle volume.resize.\* notifications * Network: process metering reports from Neutron * Alarm history storage implementation for mongodb * Fix migration with fkeys * Fixes two typos in this measurements.rst * Add a fake UUID to Meter on API level * Append /usr/sbin:/sbin to the path for searching mongodb * Plug alarm history logic into the API * Added upper version boundry for six * db2 distinct call results are different from mongodb call * Sync rpc from oslo-incubator * Imported Translations from Transifex * Add pagination parameter to the database backends of storage * Base Alarm history persistence model * Fix empty metadata issue of instance * alarm: generate alarm\_id in API * Import middleware from Oslo * Imported Translations from Transifex * Adds group by statistics for MongoDB driver * Fix wrong UniqueConstraint name * Adds else and TODO in statistics storage tests * Imported Translations from Transifex * Extra indexes cleanup * API FunctionalTest class lacks doc strings * install manual last few sections format needs to be fixed * api: update v1 for Flask >= 0.10 * Use system locale when Accept-Language header is not provided * Adds Hyper-V compute inspector * missing resource in middleware notification * Support for wildcard in pipeline * Refactored storage tests to use testscenarios * doc: replace GitHub by git.openstack.org * api: allow usage of resource\_metadata in query * Remove useless doc/requirements * Fixes non-string metadata query issue * rpc: reduce sleep time * Move sqlachemy tests only in test\_impl\_sqlachemy * Raise Error when pagination/groupby is missing * Raise Error when pagination support is missing * Use timeutils.utcnow in alarm threshold evaluation * db2 support * plugin: remove is\_enabled * Doc: improve doc about Nova measurements * Storing events via dispatchers * Imported Translations from Transifex * ceilometer-agent-compute did not catch exception for disk error * Change counter to sample in network tests * Change counter to sample in objectstore tests * Remove no more used code in test\_notifier * Change counter to sample vocable in cm.transformer * Change counter to sample vocable in cm.publisher * Change counter to sample vocable in cm.image * Change counter to sample vocable in cm.compute * Change counter to sample vocable in cm.energy * Use samples vocable in cm.publisher.test * Change counter to sample vocable in volume tests * Change counter to sample vocable in api tests * Add the source=None to from\_notification * Make RPCPublisher flush method threadsafe * Enhance delayed message translation when \_ is imported * Remove use\_greenlets argument to MongoClient * Enable concurrency on nova notifier tests * Imported Translations from Transifex * Close database connection for alembic env * Fix typo in 17738166b91 migration * Don't call publisher without sample * message\_id is not allowed to be submitted via api * Api V2 post sample refactoring * Add SQLAlchemy implementation of groupby * Fixes failed notification when deleting instance * Reinitialize pipeline manager for service restart * Sync gettextutils from oslo-incubator * Doc: clearly state that one can filter on metadata * Add HTTP request/reply samples * Use new olso fixture in CM tests * Imported Translations from Transifex * Bump hacking to 0.7.0 * Fix the dict type metadata missing issue * Raise error when period with negative value * Imported Translations from Transifex * Import missing gettext \_ * Remove 'counter' occurences in pipeline * Remove the mongo auth warning during tests * Change the error message of resource listing in mongodb * Change test\_post\_alarm case in test\_alarm\_scenarios * Skeletal alarm history API * Reorg alarms controller to facilitate history API * Fix Jenkins failed due to missing \_ * Fix nova test\_notifier wrt new notifier API * Remove counter occurences from documentation * Updated from global requirements * Fixes dict metadata query issue of HBase * s/alarm/alarm\_id/ in alarm notification * Remove unused abstract class definitions * Removed unused self.counters in storage test class * Initial alarming documentation * Include previous state in alarm notification * Consume notification from the default queue * Change meter.resource\_metadata column type * Remove MongoDB TTL support for MongoDB < 2.2 * Add first and last sample timestamp * Use MongoDB aggregate to get resources list * Fix resources/meters pagination test * Handle more Nova and Neutron events * Add support for API message localization * Add the alarm id to the rest notifier body * fix alarm notifier tests * Sync gettextutils from oslo * Fix generating coverage on MacOSX * Use the new nova Instance class * Return message\_id in POSTed samples * rpc: remove source argument from message conversion * Remove source as a publisher argument * Add repeat\_actions to alarm * Rename get\_counters to get\_samples * Add pagination support for MongoDB * Doc: measurements: add doc on Cinder/Swift config * Update nova\_client.py * objectstore: trivial cleanup in \_Base * Add support for CA authentication in Keystone * add unit attribute to statistics * Fix notify method signature on LogAlarmNotifier * Fix transformer's LOG TypeError * Update openstack.common * Fixes Hbase metadata query return wrong result * Fix Hacking 0.6 warnings * Make middleware.py Python 2.6 compatible * Call alembic migrations after sqlalchemy-migrate * Rename ceilometer.counter to ceilometer.sample * Added separate MongoDB database for each test * Relax OpenStack upper capping of client versions * Refactored MongoDB connection pool to use weakrefs * Centralized backends tests scenarios in one place * Added tests to verify that local time is correctly handled * Refactored impl\_mongodb to use full connection url * calling distinct on \_id field against a collection is slow * Use configured endpoint\_type everywhere * Allow use of local conductor * Update nova configuration doc to use notify\_on\_state\_change * doc: how to inject user-defined data * Add documentation on nova user defined metadata * Refactored API V2 tests to use testscenarios * Refactored API V1 tests to use testscenarios * alarm: Per user setting to disable ssl verify * alarm: Global setting to disable ssl verification * Imported Translations from Transifex * Implementation of the alarm RPCAlarmNotifier * Always init cfg.CONF before running a test * Sets storage\_conn in CollectorService * Remove replace/preserve logic from rate of change transformer * storage: remove per-driver options * hbase: do not register table\_prefix as a global option * mongodb: do not set replica\_set as a global option * Change nose to testr in the documentation * Fixed timestamp creation in MongoDB mapreduce * Ensure url is a string for requests.post * Implement a https:// in REST alarm notification * Implement dot in matching\_metadata key for mongodb * trailing slash in url causes 404 error * Fix missing foreign keys * Add cleanup migration for indexes * Sync models with migrations * Avoid dropping cpu\_util for multiple instances * doc: /statistics fields are not queryable (you cannot filter on them) * fix resource\_metadata failure missing image data * Standardize on X-Project-Id over X-Tenant-Id * Default to ctx user/project ID in sample POST API * Multiple dispatcher enablement * storage: fix clear/upgrade order * Lose weight for Ceilometer log in verbose mode * publisher.rpc: queing policies * Remove useless mongodb connection pool comment * Add index for db.meter by descending timestamp * doc: add a bunch of functional examples for the API * api: build the storage connection once and for all * Fix the argument of UnknownArgument exception * make publisher procedure call configurable * Disable mongod prealloc, wait for it to start * Added alembic migrations * Allow to enable time to live on metering sample * Implement a basic REST alarm notification * Imported Translations from Transifex * Ensure correct return code of run-tests.sh * File based publisher * Unset OS\_xx variable before generate configuration * Use run-tests.sh for tox coverage tests * Emit cpu\_util from transformer instead of pollster * Allow simpler scale exprs in transformer.conversions * Use a real MongoDB instance to run unit tests * Allow to specify the endpoint type to use * Rename README.md to README.rst * Use correct hostname to get instances * Provide CPU number as additional metadata * Remove get\_counter\_names from the pollster plugins * Sync SQLAlchemy models with migrations * Transformer to measure rate of change * Make sure plugins are named after their meters * Break up the swift pollsters * Split up the glance pollsters * Make visual coding style consistent * Separate power and energy pollsters * Break up compute pollsters * Implement a basic alarm notification service * Optionally store Events in Collector * Fix issue with pip installing oslo.config-1.2.0 * Transformer to convert between units * publisher.rpc: make per counter topic optional * ceilometer tests need to be enabled/cleaned * Also accept timeout parameter in FakeMemCache * Fix MongoDB backward compat wrt units * Use oslo.sphinx and remove local copy of doc theme * Reference setuptools and not distribute * enable v2 api hbase tests * Register all interesting events * Unify Counter generation from notifications * doc: enhance v2 examples * Update glossary * Imported Translations from Transifex * Imported Translations from Transifex * Filter query op:gt does not work as expected * sqlalchemy: fix performance issue on get\_meters() * enable v2 api sqlalchemy tests * Update compute vnic pollster to use cache * Update compute CPU pollster to use cache * Update compute disk I/O pollster to use cache * update Quantum references to Neutron * Update swift pollster to use cache * Update kwapi pollster to use cache * Update floating-ip pollster to use cache * Update glance pollster to use cache * Add pollster data cache * Fix flake8 errors * Update Oslo * Enable Ceilometer to support mongodb replication set * Fix return error when resource can't be found * Simple service for singleton threshold eval * Basic alarm threshold evaluation logic * add metadata to nova\_client results * Bring in oslo-common rpc ack() changes * Pin the keystone client version * Fix auth logic for PUT /v2/alarms * Imported Translations from Transifex * Change period type in alarms API to int * mongodb: fix limit value not being an integer * Check that the config file sample is always up to date * api: enable v2 tests on SQLAlchemy & HBase * Remove useless periodic\_interval option * doc: be more explicit about network counters * Capture instance metadata in reserved namespace * Imported Translations from Transifex * pep8: enable E125 checks * pep8: enable F403 checks * pep8: enable H302 checks * pep8: enable H304 checks * pep8: enable H401 * pep8: enable H402 checks * Rename the MeterPublisher to RPCPublisher * Replace publisher name by URL * Enable pep8 H403 checks * Activate H404 checks * Ceilometer may generate wrong format swift url in some situations * Code cleanup * Update Oslo * Use Flake8 gating for bin/ceilometer-\* * Update requirements to fix devstack installation * Update to the latest stevedore * Start gating on H703 * Remove disabled\_notification\_listeners option * Remove disabled\_compute\_pollsters option * Remove disabled\_central\_pollsters option * Longer string columns for Trait and UniqueNames * Fix nova notifier tests * pipeline: switch publisher loading model to driver * Enforce reverse time-order for sample return * Remove explicit distribute depend * Use Python 3.x compatible octal literals * Improve Python 3.x compatibility * Fix requirements * Corrected path for test requirements in docs * Fix some typo in documentation * Add instance\_scheduled in entry points * fix session connection * Remove useless imports, reenable F401 checks * service: run common initialization stuff * Use console scripts for ceilometer-api * Use console scripts for ceilometer-dbsync * Use console scripts for ceilometer-agent-compute * Use console scripts for ceilometer-agent-central * agent-central: use CONF.import\_opt rather than import * Move os\_\* options into a group * Use console scripts for ceilometer-collector * sqlalchemy: migration error when running db-sync * session flushing error * api: add limit parameters to meters * python3: Introduce py33 to tox.ini * Start to use Hacking * Session does not use ceilometer.conf's database\_connection * Add support for limiting the number of samples returned * Imported Translations from Transifex * Add support policy to installation instructions * sql: fix 003 downgrade * service: remove useless PeriodicService class * Fix nova notifier tests * Explicitly set downloadcache in tox.ini * Imported Translations from Transifex * Switch to sphinxcontrib-pecanwsme for API docs * Update oslo, use new configuration generator * doc: fix hyphens instead of underscores for 'os\*' conf options * Allow specifying a listen IP * Log configuration values on API startup * Don't use pecan to configure logging * Mark sensitive config options as secret * Imported Translations from Transifex * ImagePollster record duplicate counter during one poll * Rename requires files to standard names * Add an UDP publisher and receiver * hbase metaquery support * Imported Translations from Transifex * Fix and update extract\_opts group extraction * Fix the sample name of 'resource\_metadata' * Added missing source variable in storage drivers * Add Event methods to db api * vnics: don't presume existence of filterref/filter * force the test path to a str (sometimes is unicode) * Make sure that v2 api tests have the policy file configured * Imported Translations from Transifex * setup.cfg misses swift filter * Add a counter for instance scheduling * Move recursive\_keypairs into utils * Replace nose with testr * Use fixtures in the tests * fix compute units in measurement doc * Allow suppression of v1 API * Restore default interval * Change from unittest to testtools * remove unused tests/skip module * Imported Translations from Transifex * Get all tests to use tests.base.TestCase * Allow just a bit longer to wait for the server to startup * Document keystone\_authtoken section * Restore test dependency on Ming * Set the default pipline config file for tests * Imported Translations from Transifex * Fix cross-document references * Fix config setting references in API tests * Restrict pep8 & co to pep8 target * Fix meter\_publisher in setup.cfg * Use flake8 instead of pep8 * Imported Translations from Transifex * Use sqlalchemy session code from oslo * Switch to pbr * fix the broken ceilometer.conf.sample link * Add a direct Ceilometer notifier * Do the same auth checks in the v2 API as in the v1 API * Add the sqlalchemy implementation of the alarms collection * Allow posting samples via the rest API (v2) * Updated the ceilometer.conf.sample * Don't use trivial alarm\_id's like "1" in the test cases * Fix the nova notifier tests after a nova rename * Document HBase configuration * alarm: fix MongoDB alarm id * Use jsonutils instead of json in test/api.py * Connect the Alarm API to the db * Add the mongo implementation of alarms collection * Move meter signature computing into meter\_publish * Update WSME dependency * Imported Translations from Transifex * Add Alarm DB API and models * Imported Translations from Transifex * Remove "extras" again * add links to return values from API methods * Modify limitation on request version * Doc improvements * Rename EventFilter to SampleFilter * Fixes AttributeError of FloatingIPPollster * Add just the most minimal alarm API * Update oslo before bringing in exceptions * Enumerate the meter type in the API Meter class * Remove "extras" as it is not used * Adds examples of CLI and API queries to the V2 documentation * Measurements documentation update * update the ceilometer.conf.sample * Set hbase table\_prefix default to None * glance/cinder/quantum counter units are not accurate/consistent * Add some recommendations about database * Pin SQLAlchemy to 0.7.x * Ceilometer configuration.rst file not using right param names for logging * Fix require\_map\_reduce mim import * Extend swift middleware to collect number of requests * instances: fix counter unit * Remove Folsom support * transformer, publisher: move down base plugin classes * pipeline, publisher, transformer: reorganize code * Fix tests after nova changes * Update to the lastest loopingcall from oslo * Imported Translations from Transifex * update devstack instructions for cinder * Update openstack.common * Reformat openstack-common.conf * storage: move nose out of global imports * storage: get rid of get\_event\_interval * Remove gettext.install from ceilometer/\_\_init\_\_.py * Prepare for future i18n use of \_() in nova notifier * Update part of openstack.common * Convert storage drivers to return models * Adpated to nova's gettext changes * add v2 query examples * storage: remove get\_volume\_sum and get\_volume\_max * api: run tests against HBase too * api: run sum unit tests against SQL backend too * Split and fix live db tests * Remove impl\_test * api: run max\_resource\_volume test on SQL backend * Refactor DB tests * fix volume tests to utilize VOLUME\_DELETE notification * Open havana development, bump to 2013.2 * Change the column counter\_volume to Float * tests: disable Ming test if Ming unavailable * Imported Translations from Transifex * enable arguments in tox * api: run max\_volume tests on SQL backend too * api: run list\_sources tests on SQL and Mongo backend * api: run list\_resources test against SQL * api: handle case where metadata is None * Fix statistics period computing with start/end time * Allow publishing arbitrary headers via the "storage.objects.\*.bytes" counter * Updated the description of get\_counters routine * enable xml error message response * Swift pollster silently return no counter if keystone endpoint is not present * Try to get rid of the "events" & "raw events" naming in the code * Switch to python-keystoneclient 0.2.3 * include a copy of the ASL 2.0 * add keystone configuration instructions to manual install docs * Update openstack.common * remove unused dependencies * Set the default\_log\_levels to include keystoneclient * Switch to final 1.1.0 oslo.config release * Add deprecation warnings for V1 API * Raise stevedore requirement to 0.7 * Fixed the blocking unittest issues * Fix a pep/hacking error in a swift import * Add sample configuration files for mod\_wsgi * Add a tox target for building documentation * Use a non-standard port for the test server * Ensure the statistics are sorted * Start both v1 and v2 api from one daemon * Handle missing units values in mongodb data * Imported Translations from Transifex * Make HACKING compliant * Update manual installation instructions * Fix oslo.config and unittest * Return something sane from the log impl * Fix an invalid test in the storage test suite * Add the etc directory to the sdist manifest * api: run compute duration by resource on SQL backend * api: run list\_projects tests against SQL backend too * api: run list users test against SQL backend too * api: run list meters tests against SQL backend too * Kwapi pollster silently return no probre if keystone endpoint is not present * HBase storage driver, initial version * Exclude tests directory from installation * Ensure missing period is treated consistently * Exclude tests when installing ceilometer * Run some APIv1 tests on different backends * Remove old configuration metering\_storage\_engine * Set where=tests * Decouple the nova notifier from ceilometer code * send-counter: fix & test * Remove nose wrapper script * Fix count type in MongoDB * Make sure that the period is returned as an int as the api expects an int * Imported Translations from Transifex * Remove compat cfg wrapper * compute: fix unknown flavor handling * Allow empty dict as metaquery param for sqlalchemy * Add glossary definitions for additional terms * Support different publisher interval * Fix message envelope keys * Revert recent rpc wire format changes * Document the rules for units * Fix a bug in compute manager test case * plugin: don't use @staticmethod with abc * Support list/tuple as meter message value * Imported Translations from Transifex * Update common to get new kombu serialization code * Disable notifier tests * pipeline: manager publish multiple counters * Imported Translations from Transifex * Use oslo-config-2013.1b3 * mongodb: make count an integer explicitely * tests: allow to run API tests on live db * Update to latest oslo-version * Imported Translations from Transifex * Add directive to MANIFEST.in to include all the html files * Use join\_consumer\_pool() for notifications * Update openstack.common * Add period support in storage drivers and API * Update openstack/common tree * storage: fix mongo live tests * swift: configure RPC service correctly * Fix tox python version for Folsom * api: use delta\_seconds() * transformer: add acculumator transformer * Import service when cfg.CONF.os\_\* is used * pipeline: flush after publishing call * plugin: format docstring as rst * Use Mongo finalize to compute avg and duration * Code cleanup, remove useless import * api: fix a test * compute: fix notifications test * Move counter\_source definition * Allow to publish several counters in a row * Fixed resource api in v2-api * Update meter publish with pipeline framework * Use the same Keystone client instance for pollster * pipeline: fix format error in logging * More robust mocking of nova conductor * Mock more conductor API methods to unblock tests * Update pollsters to return counter list * Update V2 API documentation * Added hacking.py support to pep8 portion of tox * setup: fix typo in package data * Fix formatting issue with v1 API parameters * Multiple publisher pipeline framework * Remove setuptools\_git from setup\_requires * Removed unused param for get\_counters() * Use WSME 0.5b1 * Factorize agent code * Fixed the TemplateNotFound error in v1 api * Ceilometer-api is crashing due to pecan module missing * Clean class variable in compute manager test case * Update nova notifier test after nova change * Fix documentation formatting issues * Simplify ceilometer-api and checks Keystone middleware parsing * Fix nova conf compute\_manager unavailable * Rename run\_tests.sh to wrap\_nosetests.sh * Update openstack.common * Corrected get\_raw\_event() in sqlalchemy * Higher level test for db backends * Remove useless imports * Flatten the v2 API * Update v2 API for WSME code reorg * Update WebOb version specification * Remove the ImageSizePollster * Add Kwapi pollster (energy monitoring) * Fixes a minor documentation typo * Peg the version of Ming used in tests * Update pep8 to 1.3.3 * Remove leftover useless import * Enhance policy test for init() * Provide the meters unit's in /meters * Fix keystoneclient auth\_token middleware changes * policy: fix policy\_file finding * Remove the \_initialize\_config\_options * Add pyflakes * Make the v2 API date query parameters consistent * Fix test blocking issue and pin docutils version * Apply the official OpenStack stylesheets and templates to the Doc build * Fixed erroneous source filter in SQLAlchemy * Fix warnings in the documentation build * Handle finish and revert resize notifications * Add support for Folsom version of Swift * Implement user-api * Add support for Swift incoming/outgoing trafic metering * Pass a dict configuration file to auth\_keystone * Import only once in nova\_notifier * Fix MySQL charset error * Use default configuration file to make test data * Fix Glance control exchange * Move back api-v1 to the main api * Fix WSME arguments handling change * Remove useless gettext call in sql engine * Ground work for transifex-ify ceilometer * Add instance\_type information to NetPollster * Fix dbsync API change * Fix image\_id in instance resource metadata * Instantiate inspector in compute manager * remove direct nova db access from ceilometer * Make debugging the wsme app a bit easier * Implements database upgrade as storage engine independent * Fix the v1 api importing of acl * Add the ability to filter on metadata * Virt inspector directly layered over hypervisor API * Move meter.py into collector directory * Change mysql schema from latin1 to utf8 * Change default os-username to 'ceilometer' * Restore some metadata to the events and resources * Update documentation URL * Add sql db option to devstack for ceilometer * Remove debug print in V2 API * Start updating documentation for V2 API * Implement V2 API with Pecan and WSME * Move v1 API files into a subdirectory * Add test storage driver * Implement /meters to make discovery "nicer" from the client * Fix sqlalchemy for show\_data and v1 web api * Implement object store metering * Make Impl of mongodb and sqlalchemy consistent * add migration migrate.cfg file to the python package * Fixes to enable the jenkins doc job to work * Lower the minimum required version of anyjson * Fix blocking test for nova notifier * network: remove left-over useless nova import * tools: set novaclient minimum version * libvirt: fix Folsom compatibility * Lower pymongo dependency * Remove rickshaw subproject * Remove unused rpc import * Adapted to nova's compute\_driver moving * doc: fix cpu counter unit * tools: use tarballs rather than git for Folsom tests * Used auth\_token middleware from keystoneclient * Remove cinderclient dependency * Fix latest nova changes * api: replace minified files by complete version * Add Folsom tests to tox * Handle nova.flags removal * Provide default configuration file * Fix mysql\_engine option type * Remove nova.flags usage * api: add support for timestamp in \_list\_resources() * api: add timestamp interval support in \_list\_events() * tests: simplify api list\_resources * Update openstack.common(except policy) * Adopted the oslo's rpc.Service change * Use libvirt num\_cpu for CPU utilization calculation * Remove obsolete reference to instance.vcpus * Change references of /etc/ceilometer-{agent,collector}.conf to /etc/ceilometer/ceilometer.conf * Determine instance cores from public flavors API * Determine flavor type from the public nova API * Add comment about folsom compatibility change * Add keystone requirement for doc build * Avoid TypeError when loading libvirt.LibvirtDriver * Don't re-import flags and do parse\_args instead of flags.FLAGS() * doc: rename stackforge to openstack * Fix pymongo requirements * Update .gitreview for openstack * Update use of nova config to work with folsom * compute: remove get\_disks work-around * Use openstack versioning * Fix documentation build * document utc naive timestamp * Remove database access from agent pollsters * Fix merge error in central/manager.py * Fix nova config parsing * pollster trap error due to zero floating ip * Use the service.py in openstack-common * Allow no configured sources, provide a default file * Add service.py from openstack-common * Update common (except policy) * nova fake libvirt library breaking tests * Move db access out into a seperate file * Remove invalid fixme comments * Add new cpu\_util meter recording CPU utilization % * Fix TypeError from old-style publish\_counter calls * Fix auth middleware configuration * pin sqlalchemy to 0.7.x but not specifically 0.7.8 * add mongo index names * set tox to ignore global packages * Provide a way to disable some plugins * Use stevedore to load all plugins * implement get\_volume\_max for sqlalchemy * Add basic text/html renderer * network: floating IP account in Quantum * add unit test for CPUPollster * Clean up context usage * Add dependencies on clients used by pollsters * add ceilometer-send-counter * Update openstack.common.cfg * Fix tests broken by API change with Counter class * api: add source detail retrieval * Set source at publish time * Instance pollster emits instance. meter * timestamp columns in sqlalchemy not timezone aware * Remove obsolete/incorrect install instructions * network: emit router meter * Fix sqlalchemy performance problem * Added a working release-bugs.py script to tools/ * Change default API port * sqlalchemy record\_meter merge objs not string * Use glance public API as opposed to registry API * Add OpenStack trove classifier for PyPI * bump version number to 0.2 * Nova libvirt release note * Update metadata for PyPI registration * tox: add missing venv * Fixes a couple typos * Counter renaming * Set correct timestamp on floatingip counter * Fix API change in make\_test\_data.py * Fix Nova URL in doc * Some more doc fixes * Ignore instances in the ERROR state * Use the right version number in documentation * doc: fix network.\*.\* resource id * image: handle glance delete notifications * image: handle glance upload notifications * image: add update event, fix ImageServe owner * network: fix create/update counter type & doc * Assorted doc fixes * add max/sum project volume and fix tests * Add general options * compute.libvirt: split read/write counters * API: add Keystone ACL and policy support * Add documentation for configuration options * network: do not emit counter on exists event, fix resource id * Move net function in class method and fix instance id * Prime counter table * Fix the configuration for the nova notifier * Initialize the control\_exchange setting * Set version 0.1 * Make the instance counters use the same type * Restore manual install documentation * add quantum release note * Add release notes to docs * Update readme and create release notes * Remove duration field in Counter * Add counter for number of packets per vif * Move instance counter into its own pollster * Add a request counter for instance I/O * Rename instance disk I/O counter * Rename instances network counters * Use constant rather than string from counter type * Update the architecture diagram * Increase default polling interval * Fix compute agent publishing call * network: listen for Quantum exists event * Correct requirements filename * Fix notification subscription logic * Fix quantum notification subscriptions * Split meter publishing from the global config obj * network: add counter for actions * network: listen for Quantum notifications * Rename absolute to gauge * Fix typo in control exchanges help texts * Rework RPC notification mechanism * Update packaging files * Update URL list * Update openstack.common * Add volume/sum API endpoint for resource meters * Add resource volume/max api call * Fix dependency on anyjson * Listen for volume.delete.start instead of end * implement sqlalchemy dbengine backend * Add a notification handler for image downloads * Allow glance pollster tests to run * Create tox env definition for using a live db * Picking up dependencies from pip-requires file * Specify a new queue in manager * Rework RPC connection * Stop using nova's rpc module * Add configuration script to turn on notifications * Pep8 fixes, implement pep8 check on tests subdir * Use standard CLI options & env vars for creds * compute: remove get\_metadata\_from\_event() * Listen for volume notifications * Add pollster for Glance * Fix Nova notifier test case * Fix nova flag parsing * Add nova\_notifier notification driver for nova * Split instance polling code * Use stevedore to load storage engine drivers * Implement duration calculation API * Create tool for generating test meter data * Update openstack-common code to latest * Add bin/ceilometer-api for convenience * Add local copy of architecture diagram * Add timestamp parameters to the API docs * Check for doc build dependency before building * Pollster for network internal traffic (n1,n2) * Fix PEP8 issues * Add archicture diagram to documentation * added mongodb auth * Change timestamp management for resources * Log the instance causing the error when a pollster fails * Document how to install with devstack * Remove test skipping logic * Remove dependency on nova test modules * Add date range parameters to resource API * Add setuptools-git support * Add separate notification handler for instance flavor * Change instance meter type * Split the existing notification handlers up * Remove redundancy in the API * Separate the tox coverage test setup from py27 * Do not require user or project argument for event query * Add pymongo dependency for readthedocs.org build * Update openstack.common * Add API documentation * Be explicit about test dir * Add list projects API * Sort list of users and projects returned from queries * Add project arg to event and resource queries * Fix "meter" literal in event list API * collector exception on record\_metering\_data * Add API endpoint for listing raw event data * Change compute pollster API to work on one instance at a time * Create "central" agent * Skeleton for API server * fix use of source value in mongdb driver * Add {root,ephemeral}\_disk\_size counters * Implements vcpus counter * Fix nova configuration loading * Implements memory counter * Fix and document counter types * Check compute driver using new flag * Add openstack.common.{context,notifier,log} and update .rpc * Update review server link * Add link to roadmap * Add indexes to MongoDB driver * extend developer documentation * Reset the correct nova dependency URL * Switch .gitreview to use OpenStack gerrit * Add MongoDB engine * Convert timestamps to datetime objects before storing * Reduce complexity of storage engine API * Remove usage of nova.log * Documentation edits: * fix typo in instance properties list * Add Sphinx wrapper around existing docs * Configure nova.flags as well as openstack.common.cfg * First draft of plugin/agent documentation. Fixes bug 1018311 * Essex: update Nova to 2012.1.1, add python-novaclient * Split service preparation, periodic interval configurable * Use the same instance metadata everywhere * Emit meter event for instance "exists" * Start defining DB engine API * Fallback on nova.rpc for Essex * Add instance metadata from notification events * Combined fix to get past broken state of repo * Add more metadata to instance counter * Register storage options on import * Add Essex tests * log more than ceilometer * Remove event\_type field from meter messages * fix message signatures for nested dicts * Remove nova.flags usage * Copy openstack.common.cfg * check message signatures in the collector * Sketch out a plugin system for saving metering data * refactor meter event publishing code * Add and use ceilometer own log module * add counter type field * Use timestamp instead of datetime when creating Counter * Use new flag API * Fix a PEP8 error * Make the stand-alone test script mimic tox * Remove unneeded eventlet test requirement * Add listeners for other instance-related events * Add tox configuration * Use openstack.common.cfg for ceilometer options * Publish and receive metering messages * Add floating IP pollster * Fix tests based on DB by importing nova.tests * make the pollsters in the agent plugins * Build ceilometer-agent and ceilometer-collector * Add plugin support to the notification portion of the collector daemon * Add CPU time fetching * Add an example function for converting a nova notification to a counter * add a tool for recording notifications and replaying them * Add an exception handler to deal with errors that occur when the info in nova is out of sync with reality (as on my currently broken system). Also adds a nova prefix to the logger for now so messages from this module make it into the log file * Periodically fetch for disk io stats * Use nova.service, add a manager class * Change license to Apache 2.0 * Add setup.py * Import ceilometer-nova-compute * Ignore pyc files * Add link to blueprint * Add .gitreview file * initial commit ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/HACKING.rst0000664000175000017500000000203300000000000014517 0ustar00zuulzuul00000000000000Panko Style Commandments ======================== - Step 1: Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ - Step 2: Read on Panko Specific Commandments --------------------------- - [C301] LOG.warn() is not allowed. Use LOG.warning() - [C302] Deprecated library function os.popen() Creating Unit Tests ------------------- For every new feature, unit tests should be created that both test and (implicitly) document the usage of said feature. If submitting a patch for a bug that had no unit test, a new passing unit test should be added. If a submitted bug fix does have a unit test, be sure to add a new one that fails without the patch and passes with the patch. All unittest classes must ultimately inherit from testtools.TestCase. All setUp and tearDown methods must upcall using the super() method. tearDown methods should be avoided and addCleanup calls should be preferred. Never manually create tempfiles. Always use the tempfile fixtures from the fixture library to ensure that they are cleaned up. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/LICENSE0000664000175000017500000002363700000000000013743 0ustar00zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/MAINTAINERS0000664000175000017500000000062600000000000014424 0ustar00zuulzuul00000000000000= Generalist Code Reviewers = The current members of panko-core are listed here: https://launchpad.net/~panko-drivers/+members#active This group can +2 and approve patches in Panko. However, they may choose to seek feedback from the appropriate specialist maintainer before approving a patch if it is in any way controversial or risky. = IRC handles of maintainers = gordc jd__ liusheng pradk sileht ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4901726 panko-10.0.0/PKG-INFO0000664000175000017500000000413400000000000014022 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: panko Version: 10.0.0 Summary: Event storage publisher and API for Ceilometer Home-page: https://docs.openstack.org/panko/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: panko ===== The Panko project is an event storage service that provides the ability to store and querying event data generated by Ceilometer with potentially other sources. Panko is a component of the OpenStack Telemetry project. ------------- Documentation ------------- Documentation for the project can be found at: https://docs.openstack.org/panko/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/panko - Client: https://launchpad.net/python-pankoclient Code Repository --------------- - Server: https://github.com/openstack/panko - Client: https://github.com/openstack/python-pankoclient Bug Tracking ------------ - Bugs: https://storyboard.openstack.org/#!/project/openstack/panko IRC --- IRC Channel: #openstack-telemetry on Freenode. Release notes ------------- Release notes: https://docs.openstack.org/releasenotes/panko/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: System :: Monitoring Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/README.rst0000664000175000017500000000154600000000000014420 0ustar00zuulzuul00000000000000panko ===== The Panko project is an event storage service that provides the ability to store and querying event data generated by Ceilometer with potentially other sources. Panko is a component of the OpenStack Telemetry project. ------------- Documentation ------------- Documentation for the project can be found at: https://docs.openstack.org/panko/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/panko - Client: https://launchpad.net/python-pankoclient Code Repository --------------- - Server: https://github.com/openstack/panko - Client: https://github.com/openstack/python-pankoclient Bug Tracking ------------ - Bugs: https://storyboard.openstack.org/#!/project/openstack/panko IRC --- IRC Channel: #openstack-telemetry on Freenode. Release notes ------------- Release notes: https://docs.openstack.org/releasenotes/panko/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/babel.cfg0000664000175000017500000000002100000000000014442 0ustar00zuulzuul00000000000000[python: **.py] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/bindep.txt0000664000175000017500000000043500000000000014727 0ustar00zuulzuul00000000000000mongodb [platform:dpkg] mongodb-server [platform:rpm] mysql-server mysql-client [platform:dpkg] mysql [platform:rpm] postgresql postgresql-client [platform:dpkg] postgresql-devel [platform:rpm] postgresql-server [platform:rpm] libpq-dev [platform:dpkg] python37 [platform:rpm py37] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4581726 panko-10.0.0/devstack/0000775000175000017500000000000000000000000014527 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/README.rst0000664000175000017500000000143500000000000016221 0ustar00zuulzuul00000000000000========================== Enabling Panko in DevStack ========================== 1. Download Devstack:: git clone https://opendev.org/openstack/devstack cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] enable_plugin panko https://opendev.org/openstack/panko To use stable branches, make sure devstack is on that branch, and specify the branch name to enable_plugin, for example:: enable_plugin panko https://opendev.org/openstack/panko stable/newton There are some options, such as PANKO_BACKEND, defined in ``panko/devstack/settings``, they can be used to configure the installation of Panko. If you don't want to use their default value, you can set a new one in ``local.conf``. 3. Run ``stack.sh``. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/apache-panko.template0000664000175000017500000000073600000000000020621 0ustar00zuulzuul00000000000000Listen %PORT% WSGIDaemonProcess panko-api processes=2 threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup panko-api WSGIScriptAlias / %WSGIAPP% WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/panko.log CustomLog /var/log/%APACHE_NAME%/panko_access.log combined WSGISocketPrefix /var/run/%APACHE_NAME% ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4581726 panko-10.0.0/devstack/lib/0000775000175000017500000000000000000000000015275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/lib/elasticsearch.sh0000664000175000017500000001116600000000000020450 0ustar00zuulzuul00000000000000#!/bin/bash -xe # basic reference point for things like filecache # # TODO(sdague): once we have a few of these I imagine the download # step can probably be factored out to something nicer TOP_DIR=$(cd $(dirname "$0")/.. && pwd) FILES=$TOP_DIR/files source $TOP_DIR/stackrc # Package source and version, all pkg files are expected to have # something like this, as well as a way to override them. ELASTICSEARCH_VERSION=${ELASTICSEARCH_VERSION:-1.7.5} ELASTICSEARCH_BASEURL=${ELASTICSEARCH_BASEURL:-https://download.elasticsearch.org/elasticsearch/elasticsearch} # Elastic search actual implementation function wget_elasticsearch { local file=${1} if [ ! -f ${FILES}/${file} ]; then wget $ELASTICSEARCH_BASEURL/${file} -O ${FILES}/${file} fi if [ ! -f ${FILES}/${file}.sha1.txt ]; then wget $ELASTICSEARCH_BASEURL/${file}.sha1.txt -O ${FILES}/${file}.sha1.txt fi pushd ${FILES}; sha1sum ${file} > ${file}.sha1.gen; popd if ! diff ${FILES}/${file}.sha1.gen ${FILES}/${file}.sha1.txt; then echo "Invalid elasticsearch download. Could not install." return 1 fi return 0 } function download_elasticsearch { if is_ubuntu; then wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.deb elif is_fedora || is_suse; then wget_elasticsearch elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm fi } function configure_elasticsearch { # currently a no op : } function _check_elasticsearch_ready { # poll elasticsearch to see if it's started if ! wait_for_service 120 http://localhost:9200; then die $LINENO "Maximum timeout reached. Could not connect to ElasticSearch" fi } function start_elasticsearch { if is_ubuntu; then sudo /etc/init.d/elasticsearch start _check_elasticsearch_ready elif is_fedora; then sudo /bin/systemctl start elasticsearch.service _check_elasticsearch_ready elif is_suse; then sudo /usr/bin/systemctl start elasticsearch.service _check_elasticsearch_ready else echo "Unsupported architecture...can not start elasticsearch." fi } function stop_elasticsearch { if is_ubuntu; then sudo /etc/init.d/elasticsearch stop elif is_fedora; then sudo /bin/systemctl stop elasticsearch.service elif is_suse ; then sudo /usr/bin/systemctl stop elasticsearch.service else echo "Unsupported architecture...can not stop elasticsearch." fi } function install_elasticsearch { pip_install_gr elasticsearch if is_package_installed elasticsearch; then echo "Note: elasticsearch was already installed." return fi if is_ubuntu; then if [[ ${DISTRO} == "bionic" ]]; then is_package_installed openjdk-8-jre-headless || install_package openjdk-8-jre-headless else is_package_installed default-jre-headless || install_package default-jre-headless fi sudo dpkg -i ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.deb sudo update-rc.d elasticsearch defaults 95 10 elif is_fedora; then is_package_installed java-1.8.0-openjdk-headless || install_package java-1.8.0-openjdk-headless yum_install ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm sudo /bin/systemctl daemon-reload sudo /bin/systemctl enable elasticsearch.service elif is_suse; then is_package_installed java-1_8_0-openjdk-headless || install_package java-1_8_0-openjdk-headless zypper_install --no-gpg-checks ${FILES}/elasticsearch-${ELASTICSEARCH_VERSION}.noarch.rpm sudo /usr/bin/systemctl daemon-reload sudo /usr/bin/systemctl enable elasticsearch.service else echo "Unsupported install of elasticsearch on this architecture." fi } function uninstall_elasticsearch { if is_package_installed elasticsearch; then if is_ubuntu; then sudo apt-get purge elasticsearch elif is_fedora; then sudo yum remove elasticsearch elif is_suse; then sudo zypper rm elasticsearch else echo "Unsupported install of elasticsearch on this architecture." fi fi } # The PHASE dispatcher. All pkg files are expected to basically cargo # cult the case statement. PHASE=$1 echo "Phase is $PHASE" case $PHASE in download) download_elasticsearch ;; install) install_elasticsearch ;; configure) configure_elasticsearch ;; start) start_elasticsearch ;; stop) stop_elasticsearch ;; uninstall) uninstall_elasticsearch ;; esac ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/plugin.sh0000664000175000017500000002505100000000000016364 0ustar00zuulzuul00000000000000# Install and start **Panko** service in devstack # # To enable Panko in devstack add an entry to local.conf that # looks like # # [[local|localrc]] # enable_plugin panko https://opendev.org/openstack/panko # # Several variables set in the localrc section adjust common behaviors # of Panko (see within for additional settings): # # PANKO_BACKEND: Database backend (e.g. 'mysql', 'mongodb', 'es') # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace # Support potential entry-points console scripts in VENV or not if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["panko"]=${PANKO_DIR}.venv PANKO_BIN_DIR=${PROJECT_VENV["panko"]}/bin else PANKO_BIN_DIR=$(get_python_exec_prefix) fi if [ -z "$PANKO_DEPLOY" ]; then # Default PANKO_DEPLOY=simple # Fallback to common wsgi devstack configuration if [ "$ENABLE_HTTPD_MOD_WSGI_SERVICES" == "True" ]; then PANKO_DEPLOY=mod_wsgi # Deprecated config elif [ -n "$PANKO_USE_MOD_WSGI" ] ; then echo_summary "PANKO_USE_MOD_WSGI is deprecated, use PANKO_DEPLOY instead" if [ "$PANKO_USE_MOD_WSGI" == True ]; then PANKO_DEPLOY=mod_wsgi fi fi fi function panko_service_url { echo "$PANKO_SERVICE_PROTOCOL://$PANKO_SERVICE_HOST:$PANKO_SERVICE_PORT" } # _panko_install_mongdb - Install mongodb and python lib. function _panko_install_mongodb { # Server package is the same on all local packages=mongodb-server if is_fedora; then # mongodb client packages="${packages} mongodb" fi install_package ${packages} if is_fedora; then restart_service mongod else restart_service mongodb fi # give time for service to restart sleep 5 } # Configure mod_wsgi function _panko_config_apache_wsgi { sudo mkdir -p $PANKO_WSGI_DIR local panko_apache_conf=$(apache_site_config_for panko) local venv_path="" # Copy proxy vhost and wsgi file sudo cp $PANKO_DIR/panko/api/app.wsgi $PANKO_WSGI_DIR/app if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["panko"]}/lib/$(python_version)/site-packages" fi sudo cp $PANKO_DIR/devstack/apache-panko.template $panko_apache_conf sudo sed -e " s|%PORT%|$PANKO_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%WSGIAPP%|$PANKO_WSGI_DIR/app|g; s|%USER%|$STACK_USER|g; s|%VIRTUALENV%|$venv_path|g " -i $panko_apache_conf } # Install required services for storage backends function _panko_prepare_storage_backend { if [ "$PANKO_BACKEND" = 'mongodb' ] ; then pip_install_gr pymongo _panko_install_mongodb fi if [ "$PANKO_BACKEND" = 'es' ] ; then $PANKO_DIR/devstack/lib/elasticsearch.sh download $PANKO_DIR/devstack/lib/elasticsearch.sh install fi } # Create panko related accounts in Keystone function _panko_create_accounts { if is_service_enabled panko-api; then create_service_user "panko" "admin" get_or_create_service "panko" "event" "OpenStack Telemetry Service" get_or_create_endpoint "event" \ "$REGION_NAME" \ "$(panko_service_url)" \ "$(panko_service_url)" \ "$(panko_service_url)" fi } # Activities to do before panko has been installed. function preinstall_panko { echo_summary "Preinstall not in virtualenv context. Skipping." } # Remove WSGI files, disable and remove Apache vhost file function _panko_cleanup_apache_wsgi { sudo rm -f "$PANKO_WSGI_DIR"/* sudo rmdir "$PANKO_WSGI_DIR" sudo rm -f $(apache_site_config_for panko) } function _panko_drop_database { if is_service_enabled panko-api ; then if [ "$PANKO_BACKEND" = 'mongodb' ] ; then mongo panko --eval "db.dropDatabase();" elif [ "$PANKO_BACKEND" = 'es' ] ; then curl -XDELETE "localhost:9200/events_*" fi fi } # cleanup_panko() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_panko { if [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then _panko_cleanup_apache_wsgi fi _panko_drop_database sudo rm -f "$PANKO_CONF_DIR"/* sudo rmdir "$PANKO_CONF_DIR" } # Set configuration for storage backend. function _panko_configure_storage_backend { if [ "$PANKO_BACKEND" = 'mysql' ] || [ "$PANKO_BACKEND" = 'postgresql' ] ; then iniset $PANKO_CONF database connection $(database_connection_url panko) elif [ "$PANKO_BACKEND" = 'es' ] ; then iniset $PANKO_CONF database connection es://localhost:9200 ${TOP_DIR}/pkg/elasticsearch.sh start elif [ "$PANKO_BACKEND" = 'mongodb' ] ; then iniset $PANKO_CONF database connection mongodb://localhost:27017/panko else die $LINENO "Unable to configure unknown PANKO_BACKEND $PANKO_BACKEND" fi _panko_drop_database } # Configure Panko function configure_panko { local conffile iniset $PANKO_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" # Set up logging if [ "$SYSLOG" != "False" ]; then iniset $PANKO_CONF DEFAULT use_syslog "True" fi # Format logging if [ "$LOG_COLOR" == "True" ] && [ "$SYSLOG" == "False" ] && [ "$PANKO_DEPLOY" != "mod_wsgi" ]; then setup_colorized_logging $PANKO_CONF DEFAULT fi # Install the declarative configuration files to # the conf dir. # NOTE(cdent): Do not make this a glob as it will conflict # with rootwrap installation done elsewhere and also clobber # panko.conf settings that have already been made. # Anyway, explicit is better than implicit. cp $PANKO_DIR/etc/panko/api_paste.ini $PANKO_CONF_DIR configure_auth_token_middleware $PANKO_CONF panko $PANKO_AUTH_CACHE_DIR # Configure storage if is_service_enabled panko-api; then _panko_configure_storage_backend fi if is_service_enabled panko-api && [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then _panko_config_apache_wsgi elif [ "$PANKO_DEPLOY" == "uwsgi" ]; then # iniset creates these files when it's called if they don't exist. PANKO_UWSGI_FILE=$PANKO_CONF_DIR/panko-uwsgi.ini rm -f "$PANKO_UWSGI_FILE" iniset "$PANKO_UWSGI_FILE" uwsgi http $PANKO_SERVICE_HOST:$PANKO_SERVICE_PORT iniset "$PANKO_UWSGI_FILE" uwsgi wsgi-file "$PANKO_DIR/panko/api/app.wsgi" # This is running standalone iniset "$PANKO_UWSGI_FILE" uwsgi master true # Set die-on-term & exit-on-reload so that uwsgi shuts down iniset "$PANKO_UWSGI_FILE" uwsgi die-on-term true iniset "$PANKO_UWSGI_FILE" uwsgi exit-on-reload true iniset "$PANKO_UWSGI_FILE" uwsgi threads 10 iniset "$PANKO_UWSGI_FILE" uwsgi processes $API_WORKERS iniset "$PANKO_UWSGI_FILE" uwsgi enable-threads true iniset "$PANKO_UWSGI_FILE" uwsgi plugins python iniset "$PANKO_UWSGI_FILE" uwsgi lazy-apps true # uwsgi recommends this to prevent thundering herd on accept. iniset "$PANKO_UWSGI_FILE" uwsgi thunder-lock true # Override the default size for headers from the 4k default. iniset "$PANKO_UWSGI_FILE" uwsgi buffer-size 65535 # Make sure the client doesn't try to re-use the connection. iniset "$PANKO_UWSGI_FILE" uwsgi add-header "Connection: close" fi } # init_panko() - Initialize etc. function init_panko { # Get panko keystone settings in place _panko_create_accounts # Create cache dir sudo install -d -o $STACK_USER $PANKO_AUTH_CACHE_DIR rm -f $PANKO_AUTH_CACHE_DIR/* if is_service_enabled panko-api && is_service_enabled mysql postgresql ; then if [ "$PANKO_BACKEND" = 'mysql' ] || [ "$PANKO_BACKEND" = 'postgresql' ] || [ "$PANKO_BACKEND" = 'es' ] ; then recreate_database panko $PANKO_BIN_DIR/panko-dbsync fi fi } # Install Panko. function install_panko { if is_service_enabled panko-api; then _panko_prepare_storage_backend fi setup_develop $PANKO_DIR sudo install -d -o $STACK_USER -m 755 $PANKO_CONF_DIR if [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then install_apache_wsgi elif [ "$PANKO_DEPLOY" == "uwsgi" ]; then pip_install uwsgi fi } # start_panko() - Start running processes, including screen function start_panko { if [[ "$PANKO_DEPLOY" == "mod_wsgi" ]]; then enable_apache_site panko restart_apache_server elif [ "$PANKO_DEPLOY" == "uwsgi" ]; then run_process panko-api "$PANKO_BIN_DIR/uwsgi $PANKO_UWSGI_FILE" else run_process panko-api "$PANKO_BIN_DIR/panko-api -d -v --config-file $PANKO_CONF" fi } # configure_tempest_for_panko() # NOTE (gmann): Configure all the Tempest setting for Panko service in # this function. function configure_tempest_for_panko { if is_service_enabled tempest; then iniset $TEMPEST_CONFIG service_available panko True fi } # stop_panko() - Stop running processes function stop_panko { if is_service_enabled panko-api ; then if [ "$PANKO_DEPLOY" == "mod_wsgi" ]; then disable_apache_site panko restart_apache_server else stop_process panko-api fi fi } # install_pankoclient() - Collect source and prepare function install_pankoclient { if use_library_from_git "python-pankoclient"; then git_clone_by_name "python-pankoclient" setup_dev_lib "python-pankoclient" else pip_install pankoclient fi } # This is the main for plugin.sh if is_service_enabled panko-api; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up other services echo_summary "Configuring system services for Panko" preinstall_panko elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Panko" # Use stack_install_service here to account for virtualenv stack_install_service panko install_pankoclient elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Panko" configure_panko elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Panko" # Tidy base for panko init_panko # Start the services start_panko elif [[ "$1" == "stack" && "$2" == "test-config" ]]; then echo_summary "Configuring Tempest for Panko" configure_tempest_for_panko fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting Down Panko" stop_panko fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning Panko" cleanup_panko fi fi # Restore xtrace $XTRACE ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/settings0000664000175000017500000000151600000000000016315 0ustar00zuulzuul00000000000000enable_service panko-api # Default directories PANKO_DIR=$DEST/panko PANKO_CONF_DIR=/etc/panko PANKO_CONF=$PANKO_CONF_DIR/panko.conf PANKO_AUTH_CACHE_DIR=${PANKO_AUTH_CACHE_DIR:-/var/cache/panko} PANKO_WSGI_DIR=${PANKO_WSGI_DIR:-/var/www/panko} # Set up database backend PANKO_BACKEND=${PANKO_BACKEND:-mysql} # Panko connection info. PANKO_SERVICE_PROTOCOL=http PANKO_SERVICE_HOST=$SERVICE_HOST PANKO_SERVICE_PORT=${PANKO_SERVICE_PORT:-8977} # PANKO_DEPLOY defines how Panko is deployed, allowed values: # - mod_wsgi: Run Panko under Apache HTTPd mod_wsgi # - simple: Run panko-api # - uwsgi: Run Panko under uwsgi # - : Fallback to PANKO_USE_MOD_WSGI or ENABLE_HTTPD_MOD_WSGI_SERVICES PANKO_DEPLOY=${PANKO_DEPLOY} # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4621727 panko-10.0.0/devstack/upgrade/0000775000175000017500000000000000000000000016156 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/upgrade/settings0000664000175000017500000000046300000000000017744 0ustar00zuulzuul00000000000000register_project_for_upgrade panko devstack_localrc base enable_plugin panko https://opendev.org/openstack/panko devstack_localrc base enable_service panko-api tempest devstack_localrc target enable_plugin panko https://opendev.org/openstack/panko devstack_localrc target enable_service panko-api tempest ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/upgrade/shutdown.sh0000775000175000017500000000074600000000000020377 0ustar00zuulzuul00000000000000#!/bin/bash # # set -o errexit . $GRENADE_DIR/grenaderc . $GRENADE_DIR/functions . $BASE_DEVSTACK_DIR/functions . $BASE_DEVSTACK_DIR/stackrc # needed for status directory . $BASE_DEVSTACK_DIR/lib/tls . $BASE_DEVSTACK_DIR/lib/apache # Locate the panko plugin and get its functions PANKO_DEVSTACK_DIR=$(dirname $(dirname $0)) . $PANKO_DEVSTACK_DIR/plugin.sh set -o xtrace stop_panko # ensure everything is stopped SERVICES_DOWN="panko-api" ensure_services_stopped $SERVICES_DOWN ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/devstack/upgrade/upgrade.sh0000775000175000017500000000467600000000000020161 0ustar00zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-panko`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "*********************************************************************" echo "ERROR: Abort $0" echo "*********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params . $GRENADE_DIR/grenaderc # Import common functions . $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Save mongodb state (replace with snapshot) # TODO(chdent): There used to be a 'register_db_to_save panko' # which may wish to consider putting back in. if grep -q 'connection *= *mongo' /etc/panko/panko.conf; then mongodump --db panko --out $SAVE_DIR/panko-dump.$BASE_RELEASE fi # Upgrade Panko # ================== # Locate panko devstack plugin, the directory above the # grenade plugin. PANKO_DEVSTACK_DIR=$(dirname $(dirname $0)) # Get functions from current DevStack . $TARGET_DEVSTACK_DIR/functions . $TARGET_DEVSTACK_DIR/stackrc . $TARGET_DEVSTACK_DIR/lib/apache # Get panko functions from devstack plugin . $PANKO_DEVSTACK_DIR/settings # Print the commands being run so that we can see the command that triggers # an error. set -o xtrace # Install the target panko . $PANKO_DEVSTACK_DIR/plugin.sh stack install # calls upgrade-panko for specific release upgrade_project panko $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH # Migrate the database # NOTE(chdent): As we evolve BIN_DIR is likely to be defined, but # currently it is not. PANKO_BIN_DIR=$(dirname $(which panko-dbsync)) $PANKO_BIN_DIR/panko-dbsync || die $LINENO "DB sync error" # Start Panko start_panko ensure_services_started panko-api # Save mongodb state (replace with snapshot) if grep -q 'connection *= *mongo' /etc/panko/panko.conf; then mongodump --db panko --out $SAVE_DIR/panko-dump.$TARGET_RELEASE fi set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4621727 panko-10.0.0/doc/0000775000175000017500000000000000000000000013470 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/Makefile0000664000175000017500000001377700000000000015147 0ustar00zuulzuul00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " wadl to build a WADL file for api.openstack.org" clean: -rm -rf $(BUILDDIR)/* html: check-dependencies $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: check-dependencies check-dependencies: @python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1) @ld -ltidy >/dev/null 2>&1 || (echo "Error: Missing libtidy dependencies. Pls. install libtidy with system package manager" && exit 1) wadl: $(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl @echo @echo "Build finished. The WADL pages are in $(BUILDDIR)/wadl." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Panko.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Panko.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Panko" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Panko" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/requirements.txt0000664000175000017500000000065100000000000016756 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. sphinx>=2.0.0,!=2.1.0 # BSD openstackdocstheme>=2.2.1 # Apache-2.0 sphinxcontrib-apidoc>=0.2.0 # Apache-2.0 sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0 sphinxcontrib-httpdomain>=1.6.1 # BSD reno>=3.1.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4621727 panko-10.0.0/doc/source/0000775000175000017500000000000000000000000014770 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4621727 panko-10.0.0/doc/source/api/0000775000175000017500000000000000000000000015541 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/api/index.rst0000664000175000017500000000014000000000000017375 0ustar00zuulzuul00000000000000================= Source Code Index ================= .. toctree:: :maxdepth: 1 modules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/conf.py0000664000175000017500000002305000000000000016267 0ustar00zuulzuul00000000000000# # Panko documentation build configuration file, created by # sphinx-quickstart on Thu Oct 27 11:38:59 2011. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = [ 'sphinxcontrib.apidoc', 'sphinx.ext.autodoc', 'sphinxcontrib.pecanwsme.rest', 'sphinxcontrib.httpdomain', 'openstackdocstheme', 'oslo_policy.sphinxpolicygen' ] policy_generator_config_file = '../../etc/panko/panko-policy-generator.conf' sample_policy_basename = '_static/panko' wsme_protocols = ['restjson', 'restxml'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # openstackdocstheme options openstackdocs_repo_name = 'openstack/panko' openstackdocs_pdf_link = True openstackdocs_auto_name = False openstackdocs_bug_project = 'panko' openstackdocs_bug_tag = '' project = u'Panko' copyright = u'2012-2015, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**/#*', '**~', '**/#*#'] # The reST default role (used for this markup: `text`) # to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] primary_domain = 'py' nitpicky = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ['.'] # html_theme = '_theme' html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = { # "nosidebar": "false" #} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Pankodoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', 'maxlistdepth': '10', } # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False # Disable smartquotes, they don't work in latex smartquotes_excludes = {'builders': ['latex']} latex_domain_indices = False # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-panko.tex', u'Panko Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'panko', u'Panko Documentation', [u'OpenStack'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Panko', u'Panko Documentation', u'OpenStack', 'Panko', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Panko' epub_author = u'OpenStack' epub_publisher = u'OpenStack' epub_copyright = u'2012-2015, OpenStack' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True # -- sphinxcontrib.apidoc configuration -------------------------------------- apidoc_module_dir = '../../panko' apidoc_output_dir = 'api' apidoc_excluded_paths = [ 'tests', 'hacking', # happybase is not Python3 compatible, thus skip over them 'storage/hbase/*', 'storage/impl_hbase.py' ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4621727 panko-10.0.0/doc/source/configuration/0000775000175000017500000000000000000000000017637 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/configuration/sample_policy.rst0000664000175000017500000000137200000000000023234 0ustar00zuulzuul00000000000000=================== Panko Sample Policy =================== The following is a sample panko policy file that has been auto-generated from default policy values in code. If you're using the default policies, then the maintenance of this file is not necessary, and it should not be copied into a deployment. Doing so will result in duplicate policy definitions. It is here to help explain which policy operations protect specific panko APIs, but it is not suggested to copy and paste into a deployment unless you're planning on providing a different policy for an operation that is not the default. The sample policy file can also be viewed in :download:`file form <../_static/panko.policy.yaml.sample>`. .. literalinclude:: ../_static/panko.policy.yaml.sample ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4621727 panko-10.0.0/doc/source/contributor/0000775000175000017500000000000000000000000017342 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000244400000000000022607 0ustar00zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _contributing: ===================== Contributing to Panko ===================== Panko follows the same workflow as other OpenStack projects. To start contributing to Panko, please follow the workflow found here_. .. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow Project Hosting Details ======================= :Bug tracker: https://bugs.launchpad.net/panko :Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev (prefix subjects with ``[Panko]`` for faster responses) :Contribution Guide: https://docs.openstack.org/panko/latest/contributor/index.html :Code Hosting: https://opendev.org/openstack/panko/ :Code Review: https://review.opendev.org/#/q/status:open+project:openstack/panko,n,z ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/contributor/gmr.rst0000664000175000017500000000575300000000000020673 0ustar00zuulzuul00000000000000.. Copyright (c) 2014 OpenStack Foundation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Guru Meditation Reports ======================= Panko contains a mechanism whereby developers and system administrators can generate a report about the state of a running Panko executable. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ---------------- A *GMR* can be generated by sending the *USR1* signal to any Panko process with support (see below). The *GMR* will then be outputted standard error for that particular process. For example, suppose that ``panko-api`` has process id ``8675``, and was run with ``2>/var/log/panko/panko-api.log``. Then, ``kill -USR1 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/panko/panko-api.log``. Structure of a GMR ------------------ The *GMR* is designed to be extensible; any particular executable may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version information Threads Shows stack traces and thread ids for each of the threads within this process Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids) Configuration Lists all the configuration options currently accessible via the CONF object for the current process Adding Support for GMRs to New Executables ------------------------------------------ Adding support for a *GMR* to a given executable is fairly easy. First import the module (currently residing in oslo-incubator), as well as the Panko version module: .. code-block:: python from oslo_reports import guru_meditation_report as gmr from panko import version Then, register any additional sections (optional): .. code-block:: python TextGuruMeditation.register_section('Some Special Section', some_section_generator) Finally (under main), before running the "main loop" of the executable (usually ``service.server(server)`` or something similar), register the *GMR* hook: .. code-block:: python TextGuruMeditation.setup_autorun(version) Extending the GMR ----------------- As mentioned above, additional sections can be added to the GMR for a particular executable. For more information, see the inline documentation about oslo.reports: `oslo.reports `_ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/contributor/index.rst0000664000175000017500000000051700000000000021206 0ustar00zuulzuul00000000000000================== Contribution Guide ================== In the Contribution Guide, you will find documented policies for developing with Panko. This includes the processes we use for bugs, contributor onboarding, core reviewer memberships, and other procedural items. .. toctree:: :maxdepth: 2 contributing testing gmr ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/contributor/testing.rst0000664000175000017500000000527100000000000021556 0ustar00zuulzuul00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running the Tests ================= Panko includes an extensive set of automated unit tests which are run through tox_. 1. Install ``tox``:: $ sudo pip install tox 2. On Ubuntu install ``mongodb`` and ``libmysqlclient-dev`` packages:: $ sudo apt-get install mongodb $ sudo apt-get install libmysqlclient-dev For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead:: $ sudo yum install mongodb $ sudo yum install mariadb-devel.x86_64 3. Install the test dependencies:: $ sudo pip install -r /opt/stack/panko/test-requirements.txt 4. Run the unit and code-style tests:: $ cd /opt/stack/panko $ tox -e py27,pep8 As tox is a wrapper around testr, it also accepts the same flags as testr. See the `testr documentation`_ for details about these additional flags. .. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html Use a double hyphen to pass options to testr. For example, to run only tests under tests/api/v2:: $ tox -e py27 -- api.v2 To debug tests (ie. break into pdb debugger), you can use ''debug'' tox environment. Here's an example, passing the name of a test since you'll normally only want to run the test that hits your breakpoint:: $ tox -e debug panko.tests.test_bin For reference, the ``debug`` tox environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests 5. There is a growing suite of tests which use a tool called `gabbi`_ to test and validate the behavior of the Panko API. These tests are run when using the usual ``py27`` tox target but if desired they can be run by themselves:: $ tox -e gabbi The YAML files used to drive the gabbi tests can be found in ``panko/tests/functional/gabbi/gabbits``. If you are adding to or adjusting the API you should consider adding tests here. .. _gabbi: https://gabbi.readthedocs.org/ .. seealso:: * tox_ .. _tox: http://tox.testrun.org/latest/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/index.rst0000664000175000017500000000260200000000000016631 0ustar00zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ===================================== Welcome to the Panko's documentation! ===================================== The Panko project is an event storage service that provides the ability to store and querying event data generated by Ceilometer with potentially other sources. Panko is a component of the Telemetry project. This documentation offers information on how Panko works and how to contribute to the project. Overview ======== .. toctree:: :maxdepth: 2 install/index contributor/index webapi/index api/index Sample Configuration Files ========================== .. toctree:: :maxdepth: 2 configuration/sample_policy .. only:: html Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4621727 panko-10.0.0/doc/source/install/0000775000175000017500000000000000000000000016436 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/install/development.rst0000664000175000017500000000246400000000000021520 0ustar00zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================== Installing development sandbox ============================== Configuring devstack ==================== .. index:: double: installing; devstack 1. Download devstack_. 2. Create a ``local.conf`` file as input to devstack. 3. The panko services are not enabled by default, so they must be enabled in ``local.conf`` before running ``stack.sh``. This example ``local.conf`` file shows all of the settings required for panko:: [[local|localrc]] # Enable the Panko devstack plugin enable_plugin panko https://opendev.org/openstack/panko.git .. _devstack: http://www.devstack.org/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/install/index.rst0000664000175000017500000000141500000000000020300 0ustar00zuulzuul00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _install: ================ Installing Panko ================ .. toctree:: :maxdepth: 2 development manual mod_wsgi uwsgi ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/install/manual.rst0000664000175000017500000000752400000000000020455 0ustar00zuulzuul00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _installing_manually: =================== Installing Manually =================== Storage Backend Installation ============================ This step is a prerequisite for the collector and API services. You may use one of the listed database backends below to store Panko data. MongoDB ------- Follow the instructions to install the MongoDB_ package for your operating system, then start the service. The required minimum version of MongoDB is 2.4.x. You will also need to have pymongo_ 2.4 installed To use MongoDB as the storage backend, change the 'database' section in panko.conf as follows:: [database] connection = mongodb://username:password@host:27017/panko SQLalchemy-supported DBs ------------------------ You may alternatively use any SQLAlchemy-supported DB such as `PostgreSQL` or `MySQL`. To use MySQL as the storage backend, change the 'database' section in panko.conf as follows:: [database] connection = mysql+pymysql://username:password@host/panko?charset=utf8 .. _MongoDB: http://www.mongodb.org/ .. _pymongo: https://pypi.org/project/pymongo/ Installing the API Server ========================= .. index:: double: installing; API .. note:: The API server needs to be able to talk to keystone and panko's database. It is only required if you choose to store data in legacy database or if you inject new samples via REST API. 1. Clone the panko git repository to the server:: $ cd /opt/stack $ git clone https://opendev.org/openstack/panko.git 2. As a user with ``root`` permissions or ``sudo`` privileges, run the panko installer:: $ cd panko $ sudo python setup.py install 3. Create a service for panko in keystone:: $ openstack service create event --name panko \ --description "Panko Service" 4. Create an endpoint in keystone for panko:: $ openstack endpoint create $PANKO_SERVICE \ --region RegionOne \ --publicurl "http://$SERVICE_HOST:8977" \ --adminurl "http://$SERVICE_HOST:8977" \ --internalurl "http://$SERVICE_HOST:8977" .. note:: PANKO_SERVICE is the id of the service created by the first command and SERVICE_HOST is the host where the Panko API is running. The default port value for panko API is 8977. If the port value has been customized, adjust accordingly. 5. Choose and start the API server. Panko includes the ``panko-api`` command. This can be used to run the API server. For smaller or proof-of-concept installations this is a reasonable choice. For larger installations it is strongly recommended to install the API server in a WSGI host such as mod_wsgi (see :doc:`mod_wsgi`). Doing so will provide better performance and more options for making adjustments specific to the installation environment. If you are using the ``panko-api`` command it can be started as:: $ panko-api .. note:: The development version of the API server logs to stderr, so you may want to run this step using a screen session or other tool for maintaining a long-running program in the background. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/install/mod_wsgi.rst0000664000175000017500000000202600000000000021000 0ustar00zuulzuul00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================== Installing the API behind mod_wsgi ================================== Panko comes with a few example files for configuring the API service to run behind Apache with ``mod_wsgi``. app.wsgi ======== The file ``panko/api/app.wsgi`` sets up the V2 API WSGI application. The file is installed with the rest of the panko application code, and should not need to be modified. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/install/uwsgi.rst0000664000175000017500000000344000000000000020327 0ustar00zuulzuul00000000000000============================= Installing the API with uwsgi ============================= Panko comes with a few example files for configuring the API service to run behind Apache with ``mod_wsgi``. app.wsgi ======== The file ``panko/api/app.wsgi`` sets up the V2 API WSGI application. The file is installed with the rest of the Panko application code, and should not need to be modified. Example of uwsgi configuration file =================================== Create panko-uwsgi.ini file:: [uwsgi] http = 0.0.0.0:8041 wsgi-file = /panko/api/app.wsgi plugins = python # This is running standalone master = true # Set die-on-term & exit-on-reload so that uwsgi shuts down exit-on-reload = true die-on-term = true # uwsgi recommends this to prevent thundering herd on accept. thunder-lock = true # Override the default size for headers from the 4k default. (mainly for keystone token) buffer-size = 65535 enable-threads = true # Set the number of threads usually with the returns of command nproc threads = 8 # Make sure the client doesn't try to re-use the connection. add-header = Connection: close # Set uid and gip to an appropriate user on your server. In many # installations ``panko`` will be correct. uid = panko gid = panko Then start the uwsgi server:: uwsgi ./panko-uwsgi.ini Or start in background with:: uwsgi -d ./panko-uwsgi.ini Configuring with uwsgi-plugin-python on Debian/Ubuntu ===================================================== Install the Python plugin for uwsgi:: apt-get install uwsgi-plugin-python Run the server:: uwsgi_python --master --die-on-term --logto /var/log/panko/panko-api.log \ --http-socket :8042 --wsgi-file /usr/share/panko-common/app.wsgi ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4661727 panko-10.0.0/doc/source/webapi/0000775000175000017500000000000000000000000016237 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/webapi/index.rst0000664000175000017500000000241300000000000020100 0ustar00zuulzuul00000000000000======= Web API ======= .. toctree:: :maxdepth: 2 v2 You can get API version list via request to endpoint root path. For example:: curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8977 Sample response:: { "versions": { "values": [ { "id": "v2", "links": [ { "href": "http://127.0.0.1:8977/v2", "rel": "self" }, { "href": "https://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.telemetry-v2+json" }, { "base": "application/xml", "type": "application/vnd.openstack.telemetry-v2+xml" } ], "status": "stable", "updated": "2013-02-13T00:00:00Z" } ] } } ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/doc/source/webapi/v2.rst0000664000175000017500000000261600000000000017325 0ustar00zuulzuul00000000000000.. docbookrestapi ========== V2 Web API ========== Capabilities ============ The Capabilities API allows you to directly discover which functions from the V2 API functionality, including the selectable aggregate functions, are supported by the currently configured storage driver. A capabilities query returns a flattened dictionary of properties with associated boolean values - a 'False' or absent value means that the corresponding feature is not available in the backend. .. rest-controller:: panko.api.controllers.v2.capabilities:CapabilitiesController :webprefix: /v2/capabilities .. autoclass:: panko.api.controllers.v2.capabilities.Capabilities :members: :noindex: Events and Traits ================= .. rest-controller:: panko.api.controllers.v2.events:EventTypesController :webprefix: /v2/event_types .. rest-controller:: panko.api.controllers.v2.events:TraitsController :webprefix: /v2/event_types/(event_type)/traits .. rest-controller:: panko.api.controllers.v2.events:EventsController :webprefix: /v2/events .. autoclass:: panko.api.controllers.v2.events.Event :members: :noindex: .. autoclass:: panko.api.controllers.v2.events.Trait :members: :noindex: .. autoclass:: panko.api.controllers.v2.events.TraitDescription :members: :noindex: Filtering Queries ================= .. autoclass:: panko.api.controllers.v2.events.EventQuery :members: :noindex: ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4501727 panko-10.0.0/etc/0000775000175000017500000000000000000000000013476 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4661727 panko-10.0.0/etc/panko/0000775000175000017500000000000000000000000014606 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/etc/panko/api_paste.ini0000664000175000017500000000273700000000000017265 0ustar00zuulzuul00000000000000[composite:panko+noauth] use = egg:Paste#urlmap / = pankoversions_pipeline /healthcheck = healthcheck /v2 = pankov2_noauth_pipeline [composite:panko+keystone] use = egg:Paste#urlmap / = pankoversions_pipeline /healthcheck = healthcheck /v2 = pankov2_keystone_pipeline [pipeline:pankoversions_pipeline] pipeline = cors http_proxy_to_wsgi pankoversions [app:pankoversions] paste.app_factory = panko.api.app:app_factory root = panko.api.controllers.root.VersionsController [pipeline:pankov2_keystone_pipeline] pipeline = cors http_proxy_to_wsgi request_id osprofiler authtoken pankov2 [pipeline:pankov2_noauth_pipeline] pipeline = cors http_proxy_to_wsgi request_id osprofiler pankov2 [app:pankov2] paste.app_factory = panko.api.app:app_factory root = panko.api.controllers.v2.root.V2Controller [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory oslo_config_project = panko [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = panko [filter:http_proxy_to_wsgi] paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory oslo_config_project = panko [filter:osprofiler] paste.filter_factory = panko.profiler:WsgiMiddleware.factory oslo_config_project = panko [app:healthcheck] paste.app_factory = oslo_middleware:Healthcheck.app_factory backends = disable_by_file disable_by_file_path = /etc/panko/healthcheck_disable ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/etc/panko/panko-config-generator.conf0000664000175000017500000000041200000000000022011 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/panko/panko.conf wrap_width = 79 namespace = panko namespace = oslo.db namespace = oslo.log namespace = oslo.middleware.cors namespace = oslo.middleware.http_proxy_to_wsgi namespace = oslo.policy namespace = keystonemiddleware.auth_token ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/etc/panko/panko-policy-generator.conf0000664000175000017500000000010700000000000022044 0ustar00zuulzuul00000000000000[DEFAULT] output_file = etc/panko/policy.yaml.sample namespace = panko ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4661727 panko-10.0.0/panko/0000775000175000017500000000000000000000000014033 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/__init__.py0000664000175000017500000000146100000000000016146 0ustar00zuulzuul00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NotImplementedError(NotImplementedError): # FIXME(jd) This is used by WSME to return a correct HTTP code. We should # not expose it here but wrap our methods in the API to convert it to a # proper HTTP error. code = 501 ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/api/0000775000175000017500000000000000000000000014604 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/__init__.py0000664000175000017500000000000000000000000016703 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/app.py0000664000175000017500000000461700000000000015746 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import uuid from oslo_config import cfg from oslo_log import log from paste import deploy import pecan from panko.api import hooks from panko.api import middleware from panko import service LOG = log.getLogger(__name__) def setup_app(root, conf): app_hooks = [hooks.ConfigHook(conf), hooks.DBHook(conf), hooks.TranslationHook()] return pecan.make_app( root, hooks=app_hooks, wrap_app=middleware.ParsableErrorMiddleware, guess_content_type_from_ext=False ) # NOTE(sileht): pastedeploy uses ConfigParser to handle # global_conf, since python 3 ConfigParser doesn't # allow to store object as config value, only strings are # permit, so to be able to pass an object created before paste load # the app, we store them into a global var. But the each loaded app # store it's configuration in unique key to be concurrency safe. global APPCONFIGS APPCONFIGS = {} def load_app(conf, appname='panko+keystone'): global APPCONFIGS # Build the WSGI app cfg_path = conf.api_paste_config if not os.path.isabs(cfg_path): cfg_path = conf.find_file(cfg_path) if cfg_path is None or not os.path.exists(cfg_path): raise cfg.ConfigFilesNotFoundError([conf.api_paste_config]) config = dict(conf=conf) configkey = str(uuid.uuid4()) APPCONFIGS[configkey] = config LOG.info("Full WSGI config used: %s" % cfg_path) return deploy.loadapp("config:" + cfg_path, name=appname, global_conf={'configkey': configkey}) def build_wsgi_app(argv=None): return load_app(service.prepare_service(argv=argv)) def app_factory(global_config, **local_conf): global APPCONFIGS conf = APPCONFIGS.get(global_config.get('configkey')) return setup_app(root=local_conf.get('root'), **conf) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/app.wsgi0000664000175000017500000000135700000000000016265 0ustar00zuulzuul00000000000000# -*- mode: python -*- # # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Use this file for deploying the API under mod_wsgi.""" from panko.api import app application = app.build_wsgi_app(argv=[]) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/api/controllers/0000775000175000017500000000000000000000000017152 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/__init__.py0000664000175000017500000000000000000000000021251 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/root.py0000664000175000017500000000337300000000000020515 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json' MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml' class VersionsController(object): @pecan.expose('json') def index(self): base_url = pecan.request.application_url available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }] collected = [version_descriptor(base_url, v['tag'], v['date']) for v in available] versions = {'versions': {'values': collected}} return versions def version_descriptor(base_url, version, released_on): url = version_url(base_url, version) return { 'id': version, 'links': [ {'href': url, 'rel': 'self', }, {'href': 'https://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', }], 'media-types': [ {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, }, {'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }], 'status': 'stable', 'updated': released_on, } def version_url(base_url, version_number): return '%s/%s' % (base_url, version_number) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/api/controllers/v2/0000775000175000017500000000000000000000000017501 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/v2/__init__.py0000664000175000017500000000000000000000000021600 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/v2/base.py0000664000175000017500000002002600000000000020765 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import datetime import functools import inspect from oslo_serialization import jsonutils from oslo_utils import strutils from oslo_utils import timeutils import pecan import wsme from wsme import types as wtypes from panko.i18n import _ operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt') operation_kind_enum = wtypes.Enum(str, *operation_kind) class ClientSideError(wsme.exc.ClientSideError): def __init__(self, error, status_code=400): pecan.response.translatable_error = error super(ClientSideError, self).__init__(error, status_code) class EntityNotFound(ClientSideError): def __init__(self, entity, id): super(EntityNotFound, self).__init__( _("%(entity)s %(id)s Not Found") % {'entity': entity, 'id': id}, status_code=404) class ProjectNotAuthorized(ClientSideError): def __init__(self, id, aspect='project'): params = dict(aspect=aspect, id=id) super(ProjectNotAuthorized, self).__init__( _("Not Authorized to access %(aspect)s %(id)s") % params, status_code=401) class AdvEnum(wtypes.wsproperty): """Handle default and mandatory for wtypes.Enum.""" def __init__(self, name, *args, **kwargs): self._name = '_advenum_%s' % name self._default = kwargs.pop('default', None) mandatory = kwargs.pop('mandatory', False) enum = wtypes.Enum(*args, **kwargs) super(AdvEnum, self).__init__(datatype=enum, fget=self._get, fset=self._set, mandatory=mandatory) def _get(self, parent): if hasattr(parent, self._name): value = getattr(parent, self._name) return value or self._default return self._default def _set(self, parent, value): try: if self.datatype.validate(value): setattr(parent, self._name, value) except ValueError as e: raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1), value, e) class Base(wtypes.DynamicBase): @classmethod def from_db_model(cls, m): return cls(**(m.as_dict())) @classmethod def from_db_and_links(cls, m, links): return cls(links=links, **(m.as_dict())) def as_dict(self, db_model): valid_keys = inspect.getargspec(db_model.__init__)[0] if 'self' in valid_keys: valid_keys.remove('self') return self.as_dict_from_keys(valid_keys) def as_dict_from_keys(self, keys): return dict((k, getattr(self, k)) for k in keys if hasattr(self, k) and getattr(self, k) != wsme.Unset) class Query(Base): """Query filter.""" # The data types supported by the query. _supported_types = ['integer', 'float', 'string', 'boolean', 'datetime'] # Functions to convert the data field to the correct type. _type_converters = {'integer': int, 'float': float, 'boolean': functools.partial( strutils.bool_from_string, strict=True), 'string': str, 'datetime': timeutils.parse_isotime} _op = None # provide a default def get_op(self): return self._op or 'eq' def set_op(self, value): self._op = value field = wsme.wsattr(wtypes.text, mandatory=True) "The name of the field to test" # op = wsme.wsattr(operation_kind, default='eq') # this ^ doesn't seem to work. op = wsme.wsproperty(operation_kind_enum, get_op, set_op) "The comparison operator. Defaults to 'eq'." value = wsme.wsattr(wtypes.text, mandatory=True) "The value to compare against the stored data" type = wtypes.text "The data type of value to compare against the stored data" def __repr__(self): # for logging calls return '' % (self.field, self.op, self.value, self.type) @classmethod def sample(cls): return cls(field='resource_id', op='eq', value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', type='string' ) def as_dict(self): return self.as_dict_from_keys(['field', 'op', 'type', 'value']) def _get_value_as_type(self, forced_type=None): """Convert metadata value to the specified data type. This method is called during metadata query to help convert the querying metadata to the data type specified by user. If there is no data type given, the metadata will be parsed by ast.literal_eval to try to do a smart converting. NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised from wsmeext/sphinxext.py. It's OK to call it outside the Query class. Because the "public" side of that class is actually the outside of the API, and the "private" side is the API implementation. The method is only used in the API implementation, so it's OK. :returns: metadata value converted with the specified data type. """ type = forced_type or self.type try: converted_value = self.value if not type: try: converted_value = ast.literal_eval(self.value) except (ValueError, SyntaxError): # Unable to convert the metadata value automatically # let it default to self.value pass else: if type not in self._supported_types: # Types must be explicitly declared so the # correct type converter may be used. Subclasses # of Query may define _supported_types and # _type_converters to define their own types. raise TypeError() converted_value = self._type_converters[type](self.value) if isinstance(converted_value, datetime.datetime): converted_value = timeutils.normalize_time(converted_value) except ValueError: msg = (_('Unable to convert the value %(value)s' ' to the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) except TypeError: msg = (_('The data type %(type)s is not supported. The supported' ' data type list is: %(supported)s') % {'type': type, 'supported': self._supported_types}) raise ClientSideError(msg) except Exception: msg = (_('Unexpected exception converting %(value)s to' ' the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) return converted_value class JsonType(wtypes.UserType): """A simple JSON type.""" basetype = wtypes.text name = 'json' @staticmethod def validate(value): # check that value can be serialised jsonutils.dumps(value) return value ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/v2/capabilities.py0000664000175000017500000000503100000000000022503 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from panko.api.controllers.v2 import base from panko import profiler from panko import utils def _flatten_capabilities(capabilities): return dict((k, v) for k, v in utils.recursive_keypairs(capabilities)) @profiler.trace_cls('api') class Capabilities(base.Base): """A representation of the API and storage capabilities. Usually constrained by restrictions imposed by the storage driver. """ api = {wtypes.text: bool} "A flattened dictionary of API capabilities" event_storage = {wtypes.text: bool} "A flattened dictionary of event storage capabilities" @classmethod def sample(cls): return cls( api=_flatten_capabilities({ 'events': {'query': {'simple': True}}, }), event_storage=_flatten_capabilities( {'storage': {'production_ready': True}}), ) @profiler.trace_cls('api') class CapabilitiesController(rest.RestController): """Manages capabilities queries.""" @wsme_pecan.wsexpose(Capabilities) def get(self): """Returns a flattened dictionary of API capabilities. Capabilities supported by the currently configured storage driver. """ # variation in API capabilities is effectively determined by # the lack of strict feature parity across storage drivers conn = pecan.request.conn driver_capabilities = {'events': conn.get_capabilities()['events']} driver_perf = conn.get_storage_capabilities() return Capabilities(api=_flatten_capabilities(driver_capabilities), event_storage=_flatten_capabilities(driver_perf)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/v2/events.py0000664000175000017500000002765200000000000021373 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from oslo_log import log from oslo_utils import strutils import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from panko.api.controllers.v2 import base from panko.api.controllers.v2 import utils as v2_utils from panko.api import rbac from panko.i18n import _ from panko import profiler from panko import storage from panko.storage import models as event_models LOG = log.getLogger(__name__) class TraitDescription(base.Base): """A description of a trait, with no associated value.""" type = wtypes.text "the data type, defaults to string" name = wtypes.text "the name of the trait" @classmethod def sample(cls): return cls(name='service', type='string' ) class EventQuery(base.Query): """Query arguments for Event Queries.""" _supported_types = ['integer', 'float', 'string', 'datetime'] type = wsme.wsattr(wtypes.text, default='string') "the type of the trait filter, defaults to string" field = wsme.wsattr(wtypes.text) ''' Name of the field to filter on. Can be either a trait name or field of an event. 1) Use start_timestamp/end_timestamp to filter on `generated` field. 2) Specify the 'all_tenants=True' query parameter to get all events for all projects, this is only allowed by admin users. ''' def __repr__(self): # for logging calls return '' % (self.field, self.op, self._get_value_as_type(), self.type) @classmethod def sample(cls): return cls(field="event_type", type="string", op="eq", value="compute.instance.create.start") class Trait(base.Base): """A Trait associated with an event.""" name = wtypes.text "The name of the trait" value = wtypes.text "the value of the trait" type = wtypes.text "the type of the trait (string, integer, float or datetime)" @staticmethod def _convert_storage_trait(trait): """Helper method to convert a storage model into an API trait instance. If an API trait instance is passed in, just return it. """ if isinstance(trait, Trait): return trait value = (str(trait.value) if not trait.dtype == event_models.Trait.DATETIME_TYPE else trait.value.isoformat()) trait_type = event_models.Trait.get_name_by_type(trait.dtype) return Trait(name=trait.name, type=trait_type, value=value) @classmethod def sample(cls): return cls(name='service', type='string', value='compute.hostname' ) class Event(base.Base): """A System event.""" message_id = wtypes.text "The message ID for the notification" event_type = wtypes.text "The type of the event" _traits = None def get_traits(self): return self._traits def set_traits(self, traits): self._traits = map(Trait._convert_storage_trait, traits) traits = wsme.wsproperty(wtypes.ArrayType(Trait), get_traits, set_traits) "Event specific properties" generated = datetime.datetime "The time the event occurred" raw = base.JsonType() "The raw copy of notification" @classmethod def sample(cls): return cls( event_type='compute.instance.update', generated=datetime.datetime(2015, 1, 1, 12, 0, 0, 0), message_id='94834db1-8f1b-404d-b2ec-c35901f1b7f0', traits={ Trait(name='request_id', value='req-4e2d67b8-31a4-48af-bb2f-9df72a353a72'), Trait(name='service', value='conductor.tem-devstack-01'), Trait(name='tenant_id', value='7f13f2b17917463b9ee21aa92c4b36d6') }, raw={'status': {'nested': 'started'}} ) def _build_rbac_query_filters(): filters = {'t_filter': [], 'admin_proj': None} # Returns user_id, proj_id for non-admins user_id, proj_id = rbac.get_limited_to(pecan.request.headers) # If non-admin, filter events by user and project if user_id and proj_id: filters['t_filter'].append({"key": "project_id", "string": proj_id, "op": "eq"}) filters['t_filter'].append({"key": "user_id", "string": user_id, "op": "eq"}) elif not user_id and not proj_id: filters['admin_proj'] = pecan.request.headers.get('X-Project-Id') return filters def _event_query_to_event_filter(q): evt_model_filter = { 'event_type': None, 'message_id': None, 'start_timestamp': None, 'end_timestamp': None } filters = _build_rbac_query_filters() traits_filter = filters['t_filter'] admin_proj = filters['admin_proj'] for i in q: if not i.op: i.op = 'eq' elif i.op not in base.operation_kind: error = (_('Operator %(operator)s is not supported. The supported' ' operators are: %(supported)s') % {'operator': i.op, 'supported': base.operation_kind}) raise base.ClientSideError(error) if i.field in evt_model_filter: if i.op != 'eq' and i.field in ('event_type', 'message_id'): error = (_('Operator %(operator)s is not supported. Only' ' `eq\' operator is available for field' ' %(field)s') % {'operator': i.op, 'field': i.field}) raise base.ClientSideError(error) if i.op != 'ge' and i.field == 'start_timestamp': error = (_('Operator %(operator)s is not supported. Only' ' `ge\' operator is available for field' ' %(field)s') % {'operator': i.op, 'field': i.field}) raise base.ClientSideError(error) if i.op != 'le' and i.field == 'end_timestamp': error = (_('Operator %(operator)s is not supported. Only' ' `le\' operator is available for field' ' %(field)s') % {'operator': i.op, 'field': i.field}) raise base.ClientSideError(error) evt_model_filter[i.field] = i.value elif i.field == 'all_tenants' and admin_proj: all_tenants = strutils.bool_from_string(i.value) if all_tenants: admin_proj = None else: trait_type = i.type or 'string' traits_filter.append({"key": i.field, trait_type: i._get_value_as_type(), "op": i.op}) return storage.EventFilter(traits_filter=traits_filter, admin_proj=admin_proj, **evt_model_filter) @profiler.trace_cls('api') class TraitsController(rest.RestController): """Works on Event Traits.""" @v2_utils.requires_admin @wsme_pecan.wsexpose([Trait], wtypes.text, wtypes.text) def get_one(self, event_type, trait_name): """Return all instances of a trait for an event type. :param event_type: Event type to filter traits by :param trait_name: Trait to return values for """ LOG.debug("Getting traits for %s", event_type) return [Trait._convert_storage_trait(t) for t in pecan.request.conn.get_traits(event_type, trait_name)] @v2_utils.requires_admin @wsme_pecan.wsexpose([TraitDescription], wtypes.text) def get_all(self, event_type): """Return all trait names for an event type. :param event_type: Event type to filter traits by """ get_trait_name = event_models.Trait.get_name_by_type return [TraitDescription(name=t['name'], type=get_trait_name(t['data_type'])) for t in pecan.request.conn.get_trait_types(event_type)] @profiler.trace_cls('api') class EventTypesController(rest.RestController): """Works on Event Types in the system.""" traits = TraitsController() @v2_utils.requires_admin @wsme_pecan.wsexpose(None, wtypes.text) def get_one(self, event_type): """Unused API, will always return 404. :param event_type: A event type """ pecan.abort(404) @v2_utils.requires_admin @wsme_pecan.wsexpose([str]) def get_all(self): """Get all event types.""" return list(pecan.request.conn.get_event_types()) @profiler.trace_cls('api') class EventsController(rest.RestController): """Works on Events.""" @v2_utils.requires_context @wsme_pecan.wsexpose([Event], [EventQuery], int, [str], str) def get_all(self, q=None, limit=None, sort=None, marker=None): """Return all events matching the query filters. :param q: Filter arguments for which Events to return :param limit: Maximum number of samples to be returned. :param sort: A pair of sort key and sort direction combined with ":" :param marker: The pagination query marker, message id of the last item viewed """ rbac.enforce("events:index", pecan.request) q = q or [] event_filter = _event_query_to_event_filter(q) pagination = v2_utils.set_pagination_options( sort, limit, marker, event_models.Event) return [Event(message_id=event.message_id, event_type=event.event_type, generated=event.generated, traits=event.traits, raw=event.raw) for event in pecan.request.conn.get_events(event_filter, pagination)] @v2_utils.requires_context @wsme_pecan.wsexpose(Event, wtypes.text) def get_one(self, message_id): """Return a single event with the given message id. :param message_id: Message ID of the Event to be returned """ rbac.enforce("events:show", pecan.request) filters = _build_rbac_query_filters() t_filter = filters['t_filter'] admin_proj = filters['admin_proj'] event_filter = storage.EventFilter(traits_filter=t_filter, admin_proj=admin_proj, message_id=message_id) events = [event for event in pecan.request.conn.get_events(event_filter)] if not events: raise base.EntityNotFound(_("Event"), message_id) if len(events) > 1: LOG.error(("More than one event with " "id %s returned from storage driver"), message_id) event = events[0] return Event(message_id=event.message_id, event_type=event.event_type, generated=event.generated, traits=event.traits, raw=event.raw) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/v2/root.py0000664000175000017500000000215000000000000021034 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from panko.api.controllers.v2 import capabilities from panko.api.controllers.v2 import events class V2Controller(object): """Version 2 API controller root.""" event_types = events.EventTypesController() events = events.EventsController() capabilities = capabilities.CapabilitiesController() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/controllers/v2/utils.py0000664000175000017500000001231100000000000021211 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import pecan import wsme from panko.api.controllers.v2 import base from panko.api import rbac def get_auth_project(on_behalf_of=None): auth_project = rbac.get_limited_to_project(pecan.request.headers) created_by = pecan.request.headers.get('X-Project-Id') is_admin = auth_project is None if is_admin and on_behalf_of != created_by: auth_project = on_behalf_of return auth_project # TODO(fabiog): this decorator should disappear and have a more unified # way of controlling access and scope. Before messing with this, though # I feel this file should be re-factored in smaller chunks one for each # controller (e.g. meters and so on ...). Right now its size is # overwhelming. def requires_admin(func): @functools.wraps(func) def wrapped(*args, **kwargs): usr_limit, proj_limit = rbac.get_limited_to(pecan.request.headers) # If User and Project are None, you have full access. if usr_limit and proj_limit: # since this decorator get's called out of wsme context # raising exception results internal error so call abort # for handling the error ex = base.ProjectNotAuthorized(proj_limit) pecan.core.abort(status_code=ex.code, detail=ex.msg) return func(*args, **kwargs) return wrapped def requires_context(func): @functools.wraps(func) def wrapped(*args, **kwargs): req_usr = pecan.request.headers.get('X-User-Id') proj_usr = pecan.request.headers.get('X-Project-Id') if ((not req_usr) or (not proj_usr)): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') return func(*args, **kwargs) return wrapped def set_pagination_options(sort, limit, marker, api_model): """Sets the options for pagination specifying query options Arguments: sort -- List of sorting criteria. Each sorting option has to format : Valid sort keys: message_id, generated (SUPPORT_SORT_KEYS in panko/event/storage/models.py) Valid sort directions: asc (ascending), desc (descending) (SUPPORT_DIRS in panko/event/storage/models.py) This defaults to asc if unspecified (DEFAULT_DIR in panko/event/storage/models.py) impl_sqlalchemy.py: (see _get_pagination_query) If sort list is empty, this defaults to ['generated:asc', 'message_id:asc'] (DEFAULT_SORT in panko/event/storage/models.py) limit -- Integer specifying maximum number of values to return If unspecified, this defaults to pecan.request.cfg.api.default_api_return_limit marker -- If specified, assumed to be an integer and assumed to be the message id of the last object on the previous page of the results api_model -- Specifies the class implementing the api model to use for this pagination. The class is expected to provide the following members: SUPPORT_DIRS SUPPORT_SORT_KEYS DEFAULT_DIR DEFAULT_SORT PRIMARY_KEY """ if limit and limit <= 0: raise wsme.exc.InvalidInput('limit', limit, 'the limit should be a positive integer.') if not limit: limit = pecan.request.cfg.api.default_api_return_limit sorts = list() for s in sort or []: sort_key, __, sort_dir = s.partition(':') if sort_key not in api_model.SUPPORT_SORT_KEYS: raise wsme.exc.InvalidInput( 'sort', s, "the sort parameter should be a pair of sort " "key and sort dir combined with ':', or only" " sort key specified and sort dir will be default " "'%s', the supported sort keys are: %s" % (str(api_model.DEFAULT_DIR), str(api_model.SUPPORT_SORT_KEYS))) if sort_dir and sort_dir not in api_model.SUPPORT_DIRS: raise wsme.exc.InvalidInput( 'sort direction', s, "the sort parameter should be a pair of sort " "key and sort dir combined with ':', or only" " sort key specified and sort dir will be default " "'%s', the supported sort directions are: %s" % (str(api_model.DEFAULT_DIR), str(api_model.SUPPORT_DIRS))) sorts.append((sort_key, sort_dir or api_model.DEFAULT_DIR)) return {'limit': limit, 'marker': marker, 'sort': sorts} ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/hooks.py0000664000175000017500000000320300000000000016277 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from pecan import hooks from panko import storage class ConfigHook(hooks.PecanHook): """Attach the configuration object to the request. That allows controllers to get it. """ def __init__(self, conf): super(ConfigHook, self).__init__() self.conf = conf def before(self, state): state.request.cfg = self.conf class DBHook(hooks.PecanHook): def __init__(self, conf): self.connection = storage.get_connection_from_config( conf) def before(self, state): state.request.conn = self.connection class TranslationHook(hooks.PecanHook): def after(self, state): # After a request has been done, we need to see if # ClientSideError has added an error onto the response. # If it has we need to get it info the thread-safe WSGI # environ to be used by the ParsableErrorMiddleware. if hasattr(state.response, 'translatable_error'): state.request.environ['translatable_error'] = ( state.response.translatable_error) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/middleware.py0000664000175000017500000001213000000000000017270 0ustar00zuulzuul00000000000000# # Copyright 2013 IBM Corp. # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ from lxml import etree from oslo_log import log from oslo_serialization import jsonutils import webob from panko import i18n LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" @staticmethod def best_match_language(accept_language): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not accept_language: return None all_languages = i18n.get_available_languages() return accept_language.best_match(all_languages) def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(( 'ErrorDocumentMiddleware received an invalid ' 'status %s' % status )) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) error = environ.get('translatable_error') user_locale = self.best_match_language(req.accept_language) if (req.accept.best_match(['application/json', 'application/xml']) == 'application/xml'): content_type = 'application/xml' try: # simple check xml is valid fault = etree.fromstring(b'\n'.join(app_iter)) # Add the translated error to the xml data if error is not None: for fault_string in fault.findall('faultstring'): fault_string.text = i18n.translate(error, user_locale) error_message = etree.tostring(fault) body = b''.join((b'', error_message, b'')) except etree.XMLSyntaxError as err: LOG.error('Error parsing HTTP response: %s', err) error_message = state['status_code'] body = '%s' % error_message body = body.encode('utf-8') else: content_type = 'application/json' app_data = b'\n'.join(app_iter) app_data = app_data.decode('utf-8') try: fault = jsonutils.loads(app_data) if error is not None and 'faultstring' in fault: fault['faultstring'] = i18n.translate(error, user_locale) except ValueError: fault = app_data body = jsonutils.dumps({'error_message': fault}) body = body.encode('utf-8') state['headers'].append(('Content-Length', str(len(body)))) state['headers'].append(('Content-Type', content_type)) body = [body] else: body = app_iter return body ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/api/rbac.py0000664000175000017500000000635600000000000016077 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2014 Hewlett-Packard Company # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Access Control Lists (ACL's) control access the API server.""" from oslo_policy import policy import pecan from panko import policies _ENFORCER = None def init(): global _ENFORCER if not _ENFORCER: _ENFORCER = policy.Enforcer(pecan.request.cfg) _ENFORCER.load_rules() _ENFORCER.register_defaults(policies.list_policies()) def reset(): global _ENFORCER if _ENFORCER: _ENFORCER.clear() _ENFORCER = None def _has_rule(name): return name in _ENFORCER.rules.keys() def enforce(policy_name, request): """Return the user and project the request should be limited to. :param request: HTTP request :param policy_name: the policy name to validate authz against. """ init() rule_method = "telemetry:" + policy_name headers = request.headers policy_dict = dict() policy_dict['roles'] = headers.get('X-Roles', "").split(",") policy_dict['user_id'] = (headers.get('X-User-Id')) policy_dict['project_id'] = (headers.get('X-Project-Id')) # maintain backward compat with Juno and previous by allowing the action if # there is no rule defined for it if ((_has_rule('default') or _has_rule(rule_method)) and not _ENFORCER.enforce(rule_method, {}, policy_dict)): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') # TODO(fabiog): these methods are still used because the scoping part is really # convoluted and difficult to separate out. def get_limited_to(headers): """Return the user and project the request should be limited to. :param headers: HTTP headers dictionary :return: A tuple of (user, project), set to None if there's no limit on one of these. """ init() policy_dict = dict() policy_dict['roles'] = headers.get('X-Roles', "").split(",") policy_dict['user_id'] = (headers.get('X-User-Id')) policy_dict['project_id'] = (headers.get('X-Project-Id')) # maintain backward compat with Juno and previous by using context_is_admin # rule if the segregation rule (added in Kilo) is not defined rule_name = 'segregation' if _has_rule( 'segregation') else 'context_is_admin' if not _ENFORCER.enforce(rule_name, {}, policy_dict): return headers.get('X-User-Id'), headers.get('X-Project-Id') return None, None def get_limited_to_project(headers): """Return the project the request should be limited to. :param headers: HTTP headers dictionary :return: A project, or None if there's no limit on it. """ return get_limited_to(headers)[1] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/cmd/0000775000175000017500000000000000000000000014576 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/cmd/__init__.py0000664000175000017500000000000000000000000016675 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/cmd/storage.py0000664000175000017500000000336500000000000016623 0ustar00zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from panko import service from panko import storage LOG = log.getLogger(__name__) def dbsync(): conf = service.prepare_service() storage.get_connection_from_config(conf).upgrade() def expirer(): conf = service.prepare_service() if conf.database.event_time_to_live > 0: LOG.debug("Clearing expired event data") conn = storage.get_connection_from_config(conf) max_count = conf.database.events_delete_batch_size try: if max_count > 0: conn.clear_expired_data(conf.database.event_time_to_live, max_count) else: deleted = max_count = 100 while deleted and deleted > 0: deleted = conn.clear_expired_data( conf.database.event_time_to_live, max_count) except TypeError: LOG.warning("Storage driver does not support " "'events_delete_batch_size' config option.") else: LOG.info("Nothing to clean, database event time to live " "is disabled") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/conf/0000775000175000017500000000000000000000000014760 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/conf/__init__.py0000664000175000017500000000000000000000000017057 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/conf/defaults.py0000664000175000017500000000257400000000000017151 0ustar00zuulzuul00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_middleware import cors def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" cors.set_defaults( allow_headers=['X-Auth-Token', 'X-Identity-Status', 'X-Roles', 'X-Service-Catalog', 'X-User-Id', 'X-Tenant-Id', 'X-Openstack-Request-Id'], expose_headers=['X-Auth-Token', 'X-Subject-Token', 'X-Service-Token', 'X-Openstack-Request-Id'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/hacking/0000775000175000017500000000000000000000000015437 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/hacking/__init__.py0000664000175000017500000000000000000000000017536 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/hacking/checks.py0000664000175000017500000000325400000000000017255 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for Panko specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range X3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the C3xx value. - List the new rule in the top level HACKING.rst file """ from hacking import core @core.flake8ext def no_log_warn(logical_line): """Disallow 'LOG.warn(' https://bugs.launchpad.net/tempest/+bug/1508442 C301 """ if logical_line.startswith('LOG.warn('): yield(0, 'C301 Use LOG.warning() rather than LOG.warn()') @core.flake8ext def no_os_popen(logical_line): """Disallow 'os.popen(' Deprecated library function os.popen() Replace it using subprocess https://bugs.launchpad.net/tempest/+bug/1529836 C302 """ if 'os.popen(' in logical_line: yield(0, 'C302 Deprecated library function os.popen(). ' 'Replace it using subprocess module. ') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/i18n.py0000664000175000017500000000204200000000000015162 0ustar00zuulzuul00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See https://docs.openstack.org/oslo.i18n/latest/user/usage.html """ import oslo_i18n DOMAIN = 'panko' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4501727 panko-10.0.0/panko/locale/0000775000175000017500000000000000000000000015272 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4501727 panko-10.0.0/panko/locale/en_GB/0000775000175000017500000000000000000000000016244 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000020031 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/locale/en_GB/LC_MESSAGES/panko.po0000664000175000017500000000511600000000000021504 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: panko VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-05-22 10:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-07-11 05:07+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s Not Found" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "Cannot create table %(table_name)s it already exists. Ignoring error" msgid "Event" msgstr "Event" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Not Authorised to access %(aspect)s %(id)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only `eq' operator is available for " "field %(field)s" msgstr "" "Operator %(operator)s is not supported. Only `eq' operator is available for " "field %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only `ge' operator is available for " "field %(field)s" msgstr "" "Operator %(operator)s is not supported. Only `ge' operator is available for " "field %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. Only `le' operator is available for " "field %(field)s" msgstr "" "Operator %(operator)s is not supported. Only `le' operator is available for " "field %(field)s" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Unable to connect to the database server: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Unable to convert the value %(value)s to the expected data type %(type)s." #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Unexpected exception converting %(value)s to the expected data type %(type)s." ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4501727 panko-10.0.0/panko/locale/ko_KR/0000775000175000017500000000000000000000000016277 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000000000000000020064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/locale/ko_KR/LC_MESSAGES/panko.po0000664000175000017500000000531400000000000021537 0ustar00zuulzuul00000000000000# JongSoo Ha , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: panko VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2019-04-10 01:12+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-11-12 03:43+0000\n" "Last-Translator: JongSoo Ha \n" "Language-Team: Korean (South Korea)\n" "Language: ko_KR\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=1; plural=0\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s 발견되지 않음" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr " %(table_name)s 이 이미 존재하므로 테이블 추가 불가능. 에러 무시" msgid "Event" msgstr "이벤트" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "%(aspect)s %(id)s로의 허가되지 않은 접근" #, python-format msgid "" "Operator %(operator)s is not supported. Only `eq' operator is available for " "field %(field)s" msgstr "" "연산자 %(operator)s 는 지원되지 않음. 오직 `eq' 연산자만이 필드 %(field)s에" "서 사용가능" #, python-format msgid "" "Operator %(operator)s is not supported. Only `ge' operator is available for " "field %(field)s" msgstr "" "연산자 %(operator)s 는 지원되지 않음. 오직 `ge' 연산자만이 필드 %(field)s에" "서 사용가능" #, python-format msgid "" "Operator %(operator)s is not supported. Only `le' operator is available for " "field %(field)s" msgstr "" "연산자 %(operator)s 는 지원되지 않음. 오직 `le' 연산자만이 필드 %(field)s에" "서 사용가능" #, python-format msgid "" "Operator %(operator)s is not supported. The supported operators are: " "%(supported)s" msgstr "연산자 %(operator)s 는 지원되지 않음. 지원되는 연산자들: %(supported)s" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "데이터타입 %(type)s 은 지원되지 않음. 지원되는 데이터타입 목록 : " "%(supported)s" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "데이터베이스 서버로 접속 불가 : %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "값 %(value)s 를 희망하는 데이터 타입 %(type)s 으로의 변환 불가" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" " %(value)s를 예측 데이터타입 %(type)s 으로 변환도중 예측치 못한 예외 발생" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/opts.py0000664000175000017500000000331200000000000015371 0ustar00zuulzuul00000000000000# Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import panko.storage import panko.utils STORAGE_OPTS = [ cfg.IntOpt('max_retries', default=10, deprecated_group='database', help='Maximum number of connection retries during startup. ' 'Set to -1 to specify an infinite retry count.'), cfg.IntOpt('retry_interval', default=10, deprecated_group='database', help='Interval (in seconds) between retries of connection.') ] def list_opts(): return [ ('DEFAULT', [ # FIXME(jd) Move to [api] cfg.StrOpt('api_paste_config', default="api_paste.ini", help="Configuration file for WSGI definition of API."), ]), ('api', [ cfg.IntOpt('default_api_return_limit', min=1, default=100, help='Default maximum number of ' 'items returned by API request.'), ]), ('database', panko.storage.OPTS), ('storage', STORAGE_OPTS), ] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4701726 panko-10.0.0/panko/policies/0000775000175000017500000000000000000000000015642 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/policies/__init__.py0000664000175000017500000000146500000000000017761 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from panko.policies import base from panko.policies import segregation from panko.policies import telemetry def list_policies(): return itertools.chain( base.list_rules(), segregation.list_rules(), telemetry.list_rules() ) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/policies/base.py0000664000175000017500000000212000000000000017121 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy ROLE_ADMIN = 'role:admin' UNPROTECTED = '' # This is a check string that represents a common persona for someone who has # read-only access to the deployment, ultimately a subset of authorization for # system users, or administrators. SYSTEM_READER = 'role:admin and system_scope:all' rules = [ # This can be removed once the deprecated policies in segregation.py have # been removed. policy.RuleDefault( name='context_is_admin', check_str=ROLE_ADMIN ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/policies/segregation.py0000664000175000017500000000277500000000000020536 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import versionutils from oslo_policy import policy from panko.policies import base DEPRECATED_REASON = """ The events API now supports system scope and default roles. """ deprecated_segregation = policy.DeprecatedRule( name='segregation', check_str='rule:context_is_admin' ) rules = [ policy.DocumentedRuleDefault( name='segregation', check_str=base.SYSTEM_READER, scope_types=['system'], description='Return the user and project the request' 'should be limited to', operations=[ { 'path': '/v2/events', 'method': 'GET' }, { 'path': '/v2/events/{message_id}', 'method': 'GET' } ], deprecated_rule=deprecated_segregation, deprecated_reason=DEPRECATED_REASON, deprecated_since=versionutils.deprecated.WALLABY ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/policies/telemetry.py0000664000175000017500000000263100000000000020230 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from panko.policies import base TELEMETRY_EVENTS = 'telemetry:events:%s' rules = [ policy.DocumentedRuleDefault( name=TELEMETRY_EVENTS % 'index', check_str=base.UNPROTECTED, scope_types=['system', 'project'], description='Return all events matching the query filters.', operations=[ { 'path': '/v2/events', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=TELEMETRY_EVENTS % 'show', check_str=base.UNPROTECTED, scope_types=['system', 'project'], description='Return a single event with the given message id.', operations=[ { 'path': '/v2/events/{message_id}', 'method': 'GET' } ] ) ] def list_rules(): return rules ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/profiler.py0000664000175000017500000000434500000000000016235 0ustar00zuulzuul00000000000000# Copyright 2017 Fujitsu Ltd. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_log import log from oslo_utils import importutils import webob.dec profiler = importutils.try_import('osprofiler.profiler') profiler_initializer = importutils.try_import('osprofiler.initializer') profiler_web = importutils.try_import('osprofiler.web') LOG = log.getLogger(__name__) class WsgiMiddleware(object): def __init__(self, application, **kwargs): self.application = application @classmethod def factory(cls, global_conf, **local_conf): if profiler_web: return profiler_web.WsgiMiddleware.factory(global_conf) def filter_(app): return cls(app) return filter_ @webob.dec.wsgify def __call__(self, request): return request.get_response(self.application) def setup(conf): if hasattr(conf, 'profiler') and conf.profiler.enabled: profiler_initializer.init_from_conf( conf=conf, context={}, project=conf.project, service=conf.prog, host=socket.gethostbyname(socket.gethostname())) LOG.info('OSprofiler is enabled.') def trace_cls(name, **kwargs): """Wrap the OSprofiler trace_cls. Wrap the OSprofiler trace_cls decorator so that it will not try to patch the class unless OSprofiler is present. :param name: The name of action. For example, wsgi, rpc, db, ... :param kwargs: Any other keyword args used by profiler.trace_cls """ def decorator(cls): if profiler: trace_decorator = profiler.trace_cls(name, **kwargs) return trace_decorator(cls) return cls return decorator ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4741726 panko-10.0.0/panko/publisher/0000775000175000017500000000000000000000000016030 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/publisher/__init__.py0000664000175000017500000000000000000000000020127 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/publisher/database.py0000664000175000017500000000256500000000000020156 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from panko import service from panko import storage class DatabasePublisher(object): """Publisher class for recording event data into database. The publisher class which records each event into a database configured in Ceilometer configuration file. To enable this publisher, the following section needs to be present in panko.conf file [database] connection = mysql+pymysql://panko:password@127.0.0.1/panko?charset=utf8 Then, panko:// should be added to Ceilometer's event_pipeline.yaml """ def __init__(self, ceilo_conf, parsed_url): conf = service.prepare_service([], share=True) self.conn = storage.get_connection_from_config(conf) def publish_events(self, events): if not isinstance(events, list): events = [events] self.conn.record_events(events) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/service.py0000664000175000017500000000372200000000000016051 0ustar00zuulzuul00000000000000# Copyright 2012-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_config import cfg from oslo_db import options as db_options import oslo_i18n from oslo_log import log from oslo_reports import guru_meditation_report as gmr from oslo_utils import importutils from panko.conf import defaults from panko import opts from panko import profiler from panko import version profiler_opts = importutils.try_import('osprofiler.opts') def prepare_service(argv=None, config_files=None, share=False): conf = cfg.ConfigOpts() for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) db_options.set_defaults(conf) if profiler_opts: profiler_opts.set_defaults(conf) if not share: defaults.set_cors_middleware_defaults() oslo_i18n.enable_lazy() log.register_options(conf) if argv is None: argv = sys.argv conf(argv[1:], project='panko', validate_default_values=True, version=version.version_info.version_string(), default_config_files=config_files) if not share: log.setup(conf, 'panko') profiler.setup(conf) # NOTE(liusheng): guru cannot run with service under apache daemon, so when # panko-api running with mod_wsgi, the argv is [], we don't start # guru. if argv: gmr.TextGuruMeditation.setup_autorun(version) return conf ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4741726 panko-10.0.0/panko/storage/0000775000175000017500000000000000000000000015477 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/__init__.py0000664000175000017500000001140200000000000017606 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend management """ from urllib import parse as urlparse from oslo_config import cfg from oslo_log import log from stevedore import driver import tenacity from panko import utils LOG = log.getLogger(__name__) OPTS = [ cfg.IntOpt('event_time_to_live', default=-1, help=("Number of seconds that events are kept " "in the database for (<= 0 means forever).")), cfg.IntOpt('events_delete_batch_size', default=0, min=0, help=("Number of events to be deleted in one iteration " "from the database for (0 means all).")), cfg.StrOpt('event_connection', secret=True, deprecated_for_removal=True, help='The connection string used to connect ' 'to the event database - rather use ${database.connection}'), cfg.BoolOpt('es_ssl_enabled', default=False, help="Enable HTTPS connection in the Elasticsearch " "connection"), cfg.StrOpt('es_index_name', default='events', help='The name of the index in Elasticsearch') ] class StorageUnknownWriteError(Exception): """Error raised when an unknown error occurs while recording.""" class StorageBadVersion(Exception): """Error raised when the storage backend version is not good enough.""" class StorageBadAggregate(Exception): """Error raised when an aggregate is unacceptable to storage backend.""" code = 400 class InvalidMarker(Exception): """Invalid pagination marker parameters""" def get_connection_from_config(conf): retries = conf.database.max_retries @tenacity.retry( reraise=True, wait=tenacity.wait_fixed(conf.database.retry_interval), stop=(tenacity.stop_after_attempt(retries) if retries >= 0 else tenacity.stop_never) ) def _inner(): url = (conf.database.connection or getattr(conf.database, 'event_connection', None)) return get_connection(url, conf) return _inner() def get_connection(url, conf): """Return an open connection to the database.""" connection_scheme = urlparse.urlparse(url).scheme # SqlAlchemy connections specify may specify a 'dialect' or # 'dialect+driver'. Handle the case where driver is specified. engine_name = connection_scheme.split('+')[0] # NOTE: translation not applied bug #1446983 LOG.debug('looking for %(name)r driver in panko.storage', {'name': engine_name}) mgr = driver.DriverManager('panko.storage', engine_name) return mgr.driver(url, conf) class EventFilter(object): """Properties for building an Event query. :param start_timestamp: UTC start datetime (mandatory) :param end_timestamp: UTC end datetime (mandatory) :param event_type: the name of the event. None for all. :param message_id: the message_id of the event. None for all. :param admin_proj: the project_id of admin role. None if non-admin user. :param traits_filter: the trait filter dicts, all of which are optional. This parameter is a list of dictionaries that specify trait values: .. code-block:: python {'key': , 'string': , 'integer': , 'datetime': , 'float': , 'op': } """ def __init__(self, start_timestamp=None, end_timestamp=None, event_type=None, message_id=None, traits_filter=None, admin_proj=None): self.start_timestamp = utils.sanitize_timestamp(start_timestamp) self.end_timestamp = utils.sanitize_timestamp(end_timestamp) self.message_id = message_id self.event_type = event_type self.traits_filter = traits_filter or [] self.admin_proj = admin_proj def __repr__(self): return ("" % (self.start_timestamp, self.end_timestamp, self.event_type, str(self.traits_filter))) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/base.py0000664000175000017500000000762000000000000016770 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for storage engines """ import panko class Model(object): """Base class for storage API models.""" def __init__(self, **kwds): self.fields = list(kwds) for k, v in kwds.items(): setattr(self, k, v) def as_dict(self): d = {} for f in self.fields: v = getattr(self, f) if isinstance(v, Model): v = v.as_dict() elif isinstance(v, list) and v and isinstance(v[0], Model): v = [sub.as_dict() for sub in v] d[f] = v return d def __eq__(self, other): return self.as_dict() == other.as_dict() def __ne__(self, other): return not self.__eq__(other) class Connection(object): """Base class for event storage system connections.""" # A dictionary representing the capabilities of this driver. CAPABILITIES = { 'events': {'query': {'simple': False}}, } STORAGE_CAPABILITIES = { 'storage': {'production_ready': False}, } @staticmethod def __init__(url, conf): pass @staticmethod def upgrade(): """Migrate the database to `version` or the most recent version.""" @staticmethod def clear(): """Clear database.""" @staticmethod def record_events(events): """Write the events to the backend storage system. :param events: a list of model.Event objects. """ raise panko.NotImplementedError('Events not implemented.') @staticmethod def get_events(event_filter, pagination=None): """Return an iterable of model.Event objects.""" @staticmethod def get_event_types(): """Return all event types as an iterable of strings.""" raise panko.NotImplementedError('Events not implemented.') @staticmethod def get_trait_types(event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ raise panko.NotImplementedError('Events not implemented.') @staticmethod def get_traits(event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ raise panko.NotImplementedError('Events not implemented.') @classmethod def get_capabilities(cls): """Return an dictionary with the capabilities of each driver.""" return cls.CAPABILITIES @classmethod def get_storage_capabilities(cls): """Return a dictionary representing the performance capabilities. This is needed to evaluate the performance of each driver. """ return cls.STORAGE_CAPABILITIES @staticmethod def clear_expired_data(ttl, max_count=None): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. :param max_count: Number of records to delete. """ raise panko.NotImplementedError('Clearing events not implemented') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4741726 panko-10.0.0/panko/storage/hbase/0000775000175000017500000000000000000000000016561 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/hbase/__init__.py0000664000175000017500000000000000000000000020660 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/hbase/base.py0000664000175000017500000000646200000000000020055 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from urllib import parse as urlparse import warnings import happybase from oslo_log import log from oslo_utils import netutils from panko.storage.hbase import inmemory as hbase_inmemory LOG = log.getLogger(__name__) class Connection(object): """Base connection class for HBase.""" _memory_instance = None def __init__(self, url): warnings.warn("Panko's HBase driver is now deprecated. Please use " "another driver.") """Hbase Connection Initialization.""" opts = self._parse_connection_url(url) if opts['host'] == '__test__': url = os.environ.get('PANKO_TEST_HBASE_URL') if url: # Reparse URL, but from the env variable now opts = self._parse_connection_url(url) self.conn_pool = self._get_connection_pool(opts) else: # This is a in-memory usage for unit tests if Connection._memory_instance is None: LOG.debug('Creating a new in-memory HBase ' 'Connection object') Connection._memory_instance = (hbase_inmemory. MConnectionPool()) self.conn_pool = Connection._memory_instance else: self.conn_pool = self._get_connection_pool(opts) @staticmethod def _get_connection_pool(conf): """Return a connection pool to the database. .. note:: The tests use a subclass to override this and return an in-memory connection pool. """ LOG.debug('connecting to HBase on %(host)s:%(port)s', {'host': conf['host'], 'port': conf['port']}) return happybase.ConnectionPool( size=100, host=conf['host'], port=conf['port'], table_prefix=conf['table_prefix'], table_prefix_separator=conf['table_prefix_separator']) @staticmethod def _parse_connection_url(url): """Parse connection parameters from a database url. .. note:: HBase Thrift does not support authentication and there is no database name, so we are not looking for these in the url. """ opts = {} result = netutils.urlsplit(url) opts['table_prefix'] = urlparse.parse_qs( result.query).get('table_prefix', [None])[0] opts['table_prefix_separator'] = urlparse.parse_qs( result.query).get('table_prefix_separator', ['_'])[0] opts['dbtype'] = result.scheme if ':' in result.netloc: opts['host'], port = result.netloc.split(':') else: opts['host'] = result.netloc port = 9090 opts['port'] = port and int(port) or 9090 return opts ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/hbase/inmemory.py0000664000175000017500000002243600000000000021001 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """This is a very crude version of "in-memory HBase", which implements just enough functionality of HappyBase API to support testing of our driver. """ import copy import re from oslo_log import log import panko LOG = log.getLogger(__name__) class MTable(object): """HappyBase.Table mock.""" def __init__(self, name, families): self.name = name self.families = families self._rows_with_ts = {} def row(self, key, columns=None): if key not in self._rows_with_ts: return {} res = copy.copy(sorted( self._rows_with_ts.get(key).items())[-1][1]) if columns: keys = res.keys() for key in keys: if key not in columns: res.pop(key) return res def rows(self, keys): return ((k, self.row(k)) for k in keys) def put(self, key, data, ts=None): # Note: Now we use 'timestamped' but only for one Resource table. # That's why we may put ts='0' in case when ts is None. If it is # needed to use 2 types of put in one table ts=0 cannot be used. if ts is None: ts = "0" if key not in self._rows_with_ts: self._rows_with_ts[key] = {ts: data} else: if ts in self._rows_with_ts[key]: self._rows_with_ts[key][ts].update(data) else: self._rows_with_ts[key].update({ts: data}) def delete(self, key): del self._rows_with_ts[key] def _get_latest_dict(self, row): # The idea here is to return latest versions of columns. # In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}. # res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})] # sorted by ts, i.e. in this list ts_2 is the most latest. # To get result as HBase provides we should iterate in reverse order # and get from "latest" data only key-values that are not in newer data data = {} for i in sorted(self._rows_with_ts[row].items()): data.update(i[1]) return data def scan(self, filter=None, columns=None, row_start=None, row_stop=None, limit=None): columns = columns or [] sorted_keys = sorted(self._rows_with_ts) # copy data between row_start and row_stop into a dict rows = {} for row in sorted_keys: if row_start and row < row_start: continue if row_stop and row > row_stop: break rows[row] = self._get_latest_dict(row) if columns: ret = {} for row, data in rows.items(): for key in data: if key in columns: ret[row] = data rows = ret if filter: # TODO(jdanjou): we should really parse this properly, # but at the moment we are only going to support AND here filters = filter.split('AND') for f in filters: # Extract filter name and its arguments g = re.search(r"(.*)\((.*),?\)", f) fname = g.group(1).strip() fargs = [s.strip().replace('\'', '') for s in g.group(2).split(',')] m = getattr(self, fname) if callable(m): # overwrite rows for filtering to take effect # in case of multiple filters rows = m(fargs, rows) else: raise panko.NotImplementedError( "%s filter is not implemented, " "you may want to add it!") for k in sorted(rows)[:limit]: yield k, rows[k] @staticmethod def SingleColumnValueFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'SingleColumnValueFilter' is found in the 'filter' argument. """ op = args[2] column = "%s:%s" % (args[0], args[1]) value = args[3] if value.startswith('binary:'): value = value[7:] r = {} for row in rows: data = rows[row] if op == '=': if column in data and data[column] == value: r[row] = data elif op == '<': if column in data and data[column] < value: r[row] = data elif op == '<=': if column in data and data[column] <= value: r[row] = data elif op == '>': if column in data and data[column] > value: r[row] = data elif op == '>=': if column in data and data[column] >= value: r[row] = data elif op == '!=': if column in data and data[column] != value: r[row] = data return r @staticmethod def ColumnPrefixFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'ColumnPrefixFilter' is found in the 'filter' argument. :param args: a list of filter arguments, contain prefix of column :param rows: a dict of row prefixes for filtering """ value = args[0] column = 'f:' + value r = {} for row, data in rows.items(): column_dict = {} for key in data: if key.startswith(column): column_dict[key] = data[key] r[row] = column_dict return r @staticmethod def RowFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'RowFilter' is found in the 'filter' argument. :param args: a list of filter arguments, it contains operator and sought string :param rows: a dict of rows which are filtered """ op = args[0] value = args[1] if value.startswith('regexstring:'): value = value[len('regexstring:'):] r = {} for row, data in rows.items(): try: g = re.search(value, row).group() if op == '=': if g == row: r[row] = data else: raise panko.NotImplementedError( "In-memory " "RowFilter doesn't support " "the %s operation yet" % op) except AttributeError: pass return r @staticmethod def QualifierFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'QualifierFilter' is found in the 'filter' argument """ op = args[0] value = args[1] is_regex = False if value.startswith('binaryprefix:'): value = value[len('binaryprefix:'):] if value.startswith('regexstring:'): value = value[len('regexstring:'):] is_regex = True column = 'f:' + value r = {} for row in rows: data = rows[row] r_data = {} for key in data: if ((op == '=' and key.startswith(column)) or (op == '>=' and key >= column) or (op == '<=' and key <= column) or (op == '>' and key > column) or (op == '<' and key < column) or (is_regex and re.search(value, key))): r_data[key] = data[key] else: raise panko.NotImplementedError( "In-memory QualifierFilter " "doesn't support the %s " "operation yet" % op) if r_data: r[row] = r_data return r class MConnectionPool(object): def __init__(self): self.conn = MConnection() def connection(self): return self.conn class MConnection(object): """HappyBase.Connection mock.""" def __init__(self): self.tables = {} def __enter__(self, *args, **kwargs): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @staticmethod def open(): LOG.debug("Opening in-memory HBase connection") def create_table(self, n, families=None): families = families or {} if n in self.tables: return self.tables[n] t = MTable(n, families) self.tables[n] = t return t def delete_table(self, name, use_prefix=True): del self.tables[name] def table(self, name): return self.create_table(name) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/hbase/utils.py0000664000175000017500000002173300000000000020301 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Various HBase helpers""" import copy import datetime import urllib import bson.json_util try: from happybase.hbase.ttypes import AlreadyExists except ImportError: # import happybase to enable Hbase_thrift module import happybase # noqa from Hbase_thrift import AlreadyExists from oslo_log import log from oslo_serialization import jsonutils from panko.i18n import _ LOG = log.getLogger(__name__) EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4} OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='} # We need this additional dictionary because we have reverted timestamp in # row-keys for stored metrics OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<', 'ge': '<='} def timestamp(dt, reverse=True): """Timestamp is count of milliseconds since start of epoch. If reverse=True then timestamp will be reversed. Such a technique is used in HBase rowkey design when period queries are required. Because of the fact that rows are sorted lexicographically it's possible to vary whether the 'oldest' entries will be on top of the table or it should be the newest ones (reversed timestamp case). :param dt: datetime which is translated to timestamp :param reverse: a boolean parameter for reverse or straight count of timestamp in milliseconds :return: count or reversed count of milliseconds since start of epoch """ epoch = datetime.datetime(1970, 1, 1) td = dt - epoch ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000 return 0x7fffffffffffffff - ts if reverse else ts def make_events_query_from_filter(event_filter): """Return start and stop row for filtering and a query. Query is based on the selected parameter. :param event_filter: storage.EventFilter object. """ start = "%s" % (timestamp(event_filter.start_timestamp, reverse=False) if event_filter.start_timestamp else "") stop = "%s" % (timestamp(event_filter.end_timestamp, reverse=False) if event_filter.end_timestamp else "") kwargs = {'event_type': event_filter.event_type, 'event_id': event_filter.message_id} res_q = make_query(**kwargs) if event_filter.traits_filter: for trait_filter in event_filter.traits_filter: q_trait = make_query(trait_query=True, **trait_filter) if q_trait: if res_q: res_q += " AND " + q_trait else: res_q = q_trait return res_q, start, stop def make_timestamp_query(func, start=None, start_op=None, end=None, end_op=None, bounds_only=False, **kwargs): """Return a filter start and stop row for filtering and a query. Query is based on the fact that CF-name is 'rts'. :param start: Optional start timestamp :param start_op: Optional start timestamp operator, like gt, ge :param end: Optional end timestamp :param end_op: Optional end timestamp operator, like lt, le :param bounds_only: if True than query will not be returned :param func: a function that provide a format of row :param kwargs: kwargs for :param func """ # We don't need to dump here because get_start_end_rts returns strings rts_start, rts_end = get_start_end_rts(start, end) start_row, end_row = func(rts_start, rts_end, **kwargs) if bounds_only: return start_row, end_row q = [] start_op = start_op or 'ge' end_op = end_op or 'lt' if rts_start: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[start_op], rts_start)) if rts_end: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[end_op], rts_end)) res_q = None if len(q): res_q = " AND ".join(q) return start_row, end_row, res_q def get_start_end_rts(start, end): rts_start = str(timestamp(start)) if start else "" rts_end = str(timestamp(end)) if end else "" return rts_start, rts_end def make_query(trait_query=None, **kwargs): """Return a filter query string based on the selected parameters. :param trait_query: optional boolean, for trait_query from kwargs :param kwargs: key-value pairs to filter on. Key should be a real column name in db """ q = [] res_q = None # Query for traits differs from others. It is constructed with # SingleColumnValueFilter with the possibility to choose comparison # operator if trait_query: trait_name = kwargs.pop('key') op = kwargs.pop('op', 'eq') for k, v in kwargs.items(): if v is not None: res_q = ("SingleColumnValueFilter " "('f', '%s', %s, 'binary:%s', true, true)" % (prepare_key(trait_name, EVENT_TRAIT_TYPES[k]), OP_SIGN[op], dump(v))) return res_q # Note: we use extended constructor for SingleColumnValueFilter here. # It is explicitly specified that entry should not be returned if CF is not # found in table. for key, value in sorted(kwargs.items()): if value is not None: if key == 'trait_type': q.append("ColumnPrefixFilter('%s')" % value) elif key == 'event_id': q.append(r"RowFilter ( = , 'regexstring:\d*:%s')" % value) else: q.append("SingleColumnValueFilter " "('f', '%s', =, 'binary:%s', true, true)" % (quote(key), dump(value))) res_q = None if len(q): res_q = " AND ".join(q) return res_q def prepare_key(*args): """Prepares names for rows and columns with correct separator. :param args: strings or numbers that we want our key construct of :return: key with quoted args that are separated with character ":" """ key_quote = [] for key in args: if isinstance(key, int): key = str(key) key_quote.append(quote(key)) return ":".join(key_quote) def deserialize_entry(entry): """Return a list of flatten_result Flatten_result contains a dict of simple structures such as 'resource_id':1 :param entry: entry from HBase, without row name and timestamp """ flatten_result = {} for k, v in entry.items(): if ':' in k[2:]: key = tuple([unquote(i) for i in k[2:].split(':')]) else: key = unquote(k[2:]) flatten_result[key] = load(v) return flatten_result def serialize_entry(data=None, **kwargs): """Return a dict that is ready to be stored to HBase :param data: dict to be serialized :param kwargs: additional args """ data = data or {} entry_dict = copy.copy(data) entry_dict.update(**kwargs) return {'f:' + quote(k, ':'): dump(v) for k, v in entry_dict.items()} def dump(data): return jsonutils.dumps(data, default=bson.json_util.default) def load(data): return jsonutils.loads(data, object_hook=object_hook) # We don't want to have tzinfo in decoded json.This object_hook is # overwritten json_util.object_hook for $date def object_hook(dct): if "$date" in dct: dt = bson.json_util.object_hook(dct) return dt.replace(tzinfo=None) return bson.json_util.object_hook(dct) def create_tables(conn, tables, column_families): for table in tables: try: conn.create_table(table, column_families) except AlreadyExists: if conn.table_prefix: table = ("%(table_prefix)s" "%(separator)s" "%(table_name)s" % dict(table_prefix=conn.table_prefix, separator=conn.table_prefix_separator, table_name=table)) LOG.warning(_("Cannot create table %(table_name)s " "it already exists. Ignoring error") % {'table_name': table}) def quote(s, *args): """Return quoted string even if it is unicode one. :param s: string that should be quoted :param args: any symbol we want to stay unquoted """ s_en = s.encode('utf8') return urllib.parse.quote(s_en, *args) def unquote(s): """Return unquoted and decoded string. :param s: string that should be unquoted """ s_de = urllib.parse.unquote(s) return s_de.decode('utf8') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/impl_elasticsearch.py0000664000175000017500000003040000000000000021701 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator import elasticsearch as es from elasticsearch import helpers from oslo_log import log from oslo_utils import netutils from oslo_utils import timeutils from panko import storage from panko.storage import base from panko.storage import models from panko import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Put the event data into an ElasticSearch db. Events in ElasticSearch are indexed by day and stored by event_type. An example document:: {"_index":"events_2014-10-21", "_type":"event_type0", "_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779", "_score":1.0, "_source":{"timestamp": "2014-10-21T20:02:09.274797" "traits": {"id4_0": "2014-10-21T20:02:09.274797", "id3_0": 0.7510790937279408, "id2_0": 5, "id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"} } } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) # NOTE(gordc): mainly for testing, data is not searchable after write, # it is only searchable after periodic refreshes. _refresh_on_write = False def __init__(self, url, conf): url_split = netutils.urlsplit(url) use_ssl = conf.database.es_ssl_enabled self.index_name = conf.database.es_index_name self.conn = es.Elasticsearch(hosts=url_split.netloc + url_split.path, use_ssl=use_ssl) def upgrade(self): iclient = es.client.IndicesClient(self.conn) ts_template = { 'template': '*', 'mappings': {'_default_': {'properties': {'traits': {'type': 'nested'}}}}} iclient.put_template(name='enable_timestamp', body=ts_template) def record_events(self, events): def _build_bulk_index(event_list): for ev in event_list: traits = {t.name: t.value for t in ev.traits} yield {'_op_type': 'create', '_index': '%s_%s' % (self.index_name, ev.generated.date().isoformat()), '_type': ev.event_type, '_id': ev.message_id, '_source': {'timestamp': ev.generated.isoformat(), 'traits': traits, 'raw': ev.raw}} error = None for ok, result in helpers.streaming_bulk( self.conn, _build_bulk_index(events)): if not ok: __, result = result.popitem() if result['status'] == 409: LOG.info('Duplicate event detected, skipping it: %s', result) else: LOG.exception('Failed to record event: %s', result) error = storage.StorageUnknownWriteError(result) if self._refresh_on_write: self.conn.indices.refresh(index='%s_*' % self.index_name) while self.conn.cluster.pending_tasks(local=True)['tasks']: pass if error: raise error def _make_dsl_from_filter(self, indices, ev_filter): q_args = {} filters = [] if ev_filter.start_timestamp: filters.append({'range': {'timestamp': {'ge': ev_filter.start_timestamp.isoformat()}}}) while indices[0] < ( '%s_%s' % (self.index_name, ev_filter.start_timestamp.date().isoformat())): del indices[0] if ev_filter.end_timestamp: filters.append({'range': {'timestamp': {'le': ev_filter.end_timestamp.isoformat()}}}) while indices[-1] > ( '%s_%s' % (self.index_name, ev_filter.end_timestamp.date().isoformat())): del indices[-1] q_args['index'] = indices if ev_filter.event_type: q_args['doc_type'] = ev_filter.event_type if ev_filter.message_id: filters.append({'term': {'_id': ev_filter.message_id}}) if ev_filter.traits_filter or ev_filter.admin_proj: or_cond = [] trait_filters = [] for t_filter in ev_filter.traits_filter or []: value = None for val_type in ['integer', 'string', 'float', 'datetime']: if t_filter.get(val_type): value = t_filter.get(val_type) if isinstance(value, str): value = value.lower() elif isinstance(value, datetime.datetime): value = value.isoformat() break if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']: op = (t_filter.get('op').replace('ge', 'gte') .replace('le', 'lte')) trait_filters.append( {'range': { "traits.%s" % t_filter['key']: {op: value}}}) else: tf = {"query": {"query_string": { "query": "traits.%s: \"%s\"" % (t_filter['key'], value) }}} if t_filter.get('op') == 'ne': tf = {"not": tf} trait_filters.append(tf) if ev_filter.admin_proj: or_cond = [{'missing': {'field': 'traits.project_id'}}, {'term': { 'traits.project_id': ev_filter.admin_proj}}] filters.append( {'nested': {'path': 'traits', 'query': {'filtered': { 'filter': {'bool': {'must': trait_filters, 'should': or_cond}}}}}}) q_args['body'] = {'query': {'filtered': {'filter': {'bool': {'must': filters}}}}} return q_args def get_events(self, event_filter, pagination=None): limit = None if pagination: if pagination.get('sort'): LOG.warning('Driver does not support sort functionality') limit = pagination.get('limit') if limit == 0: return iclient = es.client.IndicesClient(self.conn) indices = iclient.get_mapping('%s_*' % self.index_name).keys() if indices: filter_args = self._make_dsl_from_filter(indices, event_filter) if limit is not None: filter_args['size'] = limit results = self.conn.search(fields=['_id', 'timestamp', '_type', '_source'], sort='timestamp:asc', **filter_args) trait_mappings = {} for record in results['hits']['hits']: trait_list = [] if not record['_type'] in trait_mappings: trait_mappings[record['_type']] = list( self.get_trait_types(record['_type'])) for key in record['_source']['traits'].keys(): value = record['_source']['traits'][key] for t_map in trait_mappings[record['_type']]: if t_map['name'] == key: dtype = t_map['data_type'] break else: dtype = models.Trait.TEXT_TYPE trait_list.append(models.Trait( name=key, dtype=dtype, value=models.Trait.convert_value(dtype, value))) gen_ts = timeutils.normalize_time(timeutils.parse_isotime( record['_source']['timestamp'])) yield models.Event(message_id=record['_id'], event_type=record['_type'], generated=gen_ts, traits=sorted( trait_list, key=operator.attrgetter('dtype')), raw=record['_source']['raw']) def get_event_types(self): iclient = es.client.IndicesClient(self.conn) es_mappings = iclient.get_mapping('%s_*' % self.index_name) seen_types = set() for index in es_mappings.keys(): for ev_type in es_mappings[index]['mappings'].keys(): seen_types.add(ev_type) # TODO(gordc): tests assume sorted ordering but backends are not # explicitly ordered. # NOTE: _default_ is a type that appears in all mappings but is not # real 'type' seen_types.discard('_default_') return sorted(list(seen_types)) @staticmethod def _remap_es_types(d_type): if d_type == 'string': d_type = 'text' elif d_type == 'long': d_type = 'int' elif d_type == 'double': d_type = 'float' elif d_type == 'date' or d_type == 'date_time': d_type = 'datetime' return d_type def get_trait_types(self, event_type): iclient = es.client.IndicesClient(self.conn) es_mappings = iclient.get_mapping('%s_*' % self.index_name) seen_types = [] for index in es_mappings.keys(): # if event_type exists in index and has traits if (es_mappings[index]['mappings'].get(event_type) and es_mappings[index]['mappings'][event_type]['properties'] ['traits'].get('properties')): for t_type in (es_mappings[index]['mappings'][event_type] ['properties']['traits']['properties'].keys()): d_type = (es_mappings[index]['mappings'][event_type] ['properties']['traits']['properties'] [t_type]['type']) d_type = models.Trait.get_type_by_name( self._remap_es_types(d_type)) if (t_type, d_type) not in seen_types: yield {'name': t_type, 'data_type': d_type} seen_types.append((t_type, d_type)) def get_traits(self, event_type, trait_type=None): t_types = dict((res['name'], res['data_type']) for res in self.get_trait_types(event_type)) if not t_types or (trait_type and trait_type not in t_types.keys()): return result = self.conn.search('%s_*' % self.index_name, event_type) for ev in result['hits']['hits']: if trait_type and ev['_source']['traits'].get(trait_type): yield models.Trait( name=trait_type, dtype=t_types[trait_type], value=models.Trait.convert_value( t_types[trait_type], ev['_source']['traits'][trait_type])) else: for trait in ev['_source']['traits'].keys(): yield models.Trait( name=trait, dtype=t_types[trait], value=models.Trait.convert_value( t_types[trait], ev['_source']['traits'][trait])) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/impl_hbase.py0000664000175000017500000002132500000000000020157 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import operator from oslo_log import log from panko.storage import base from panko.storage.hbase import base as hbase_base from panko.storage.hbase import utils as hbase_utils from panko.storage import models from panko import utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(hbase_base.Connection, base.Connection): """Put the event data into a HBase database Collections: - events: - row_key: timestamp of event's generation + uuid of event in format: "%s:%s" % (ts, Event.message_id) - Column Families: f: contains the following qualifiers: - event_type: description of event's type - timestamp: time stamp of event generation - all traits for this event in format: .. code-block:: python "%s:%s" % (trait_name, trait_type) """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) _memory_instance = None EVENT_TABLE = "event" def upgrade(self): tables = [self.EVENT_TABLE] column_families = {'f': dict(max_versions=1)} with self.conn_pool.connection() as conn: hbase_utils.create_tables(conn, tables, column_families) def clear(self): LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.EVENT_TABLE]: try: conn.disable_table(table) except Exception: LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: LOG.debug('Cannot delete table but ignoring error') def record_events(self, event_models): """Write the events to Hbase. :param event_models: a list of models.Event objects. """ error = None with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) for event_model in event_models: # Row key consists of timestamp and message_id from # models.Event or purposes of storage event sorted by # timestamp in the database. ts = event_model.generated row = hbase_utils.prepare_key( hbase_utils.timestamp(ts, reverse=False), event_model.message_id) event_type = event_model.event_type traits = {} if event_model.traits: for trait in event_model.traits: key = hbase_utils.prepare_key(trait.name, trait.dtype) traits[key] = trait.value record = hbase_utils.serialize_entry(traits, event_type=event_type, timestamp=ts, raw=event_model.raw) try: events_table.put(row, record) except Exception as ex: LOG.exception("Failed to record event: %s", ex) error = ex if error: raise error def get_events(self, event_filter, pagination=None): """Return an iter of models.Event objects. :param event_filter: storage.EventFilter object, consists of filters for events that are stored in database. :param pagination: Pagination parameters. """ limit = None if pagination: if pagination.get('sort'): LOG.warning('Driver does not support sort functionality') limit = pagination.get('limit') if limit == 0: return q, start, stop = hbase_utils.make_events_query_from_filter( event_filter) with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q, row_start=start, row_stop=stop, limit=limit) for event_id, data in gen: traits = [] events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_dtype = key traits.append(models.Trait(name=trait_name, dtype=int(trait_dtype), value=value)) ts, mess = event_id.split(':') yield models.Event( message_id=hbase_utils.unquote(mess), event_type=events_dict['event_type'], generated=events_dict['timestamp'], traits=sorted(traits, key=operator.attrgetter('dtype')), raw=events_dict['raw'] ) def get_event_types(self): """Return all event types as an iterable of strings.""" with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan() event_types = set() for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if not isinstance(key, tuple) and key.startswith('event_type'): if value not in event_types: event_types.add(value) yield value def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ q = hbase_utils.make_query(event_type=event_type) trait_names = set() with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q) for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_type = key if trait_name not in trait_names: # Here we check that our method return only unique # trait types, for ex. if it is found the same trait # types in different events with equal event_type, # method will return only one trait type. It is # proposed that certain trait name could have only one # trait type. trait_names.add(trait_name) data_type = models.Trait.type_names[int(trait_type)] yield {'name': trait_name, 'data_type': data_type} def get_traits(self, event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ q = hbase_utils.make_query(event_type=event_type, trait_type=trait_type) with self.conn_pool.connection() as conn: events_table = conn.table(self.EVENT_TABLE) gen = events_table.scan(filter=q) for event_id, data in gen: events_dict = hbase_utils.deserialize_entry(data)[0] for key, value in events_dict.items(): if isinstance(key, tuple): trait_name, trait_type = key yield models.Trait(name=trait_name, dtype=int(trait_type), value=value) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/impl_log.py0000664000175000017500000000207000000000000017652 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from panko.storage import base LOG = log.getLogger(__name__) class Connection(base.Connection): """Log event data.""" @staticmethod def clear_expired_data(ttl, max_count): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. :param max_count: Number of records to delete. """ LOG.info("Dropping %d events data with TTL %d", max_count, ttl) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/impl_mongodb.py0000664000175000017500000001027000000000000020517 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """MongoDB storage backend""" from oslo_log import log import pymongo from panko import storage from panko.storage.mongo import utils as pymongo_utils from panko.storage import pymongo_base LOG = log.getLogger(__name__) class Connection(pymongo_base.Connection): """Put the event data into a MongoDB database.""" CONNECTION_POOL = pymongo_utils.ConnectionPool() def __init__(self, url, conf): # NOTE(jd) Use our own connection pooling on top of the Pymongo one. # We need that otherwise we overflow the MongoDB instance with new # connection since we instantiate a Pymongo client each time someone # requires a new storage connection. self.conn = self.CONNECTION_POOL.connect( url, conf.database.max_retries, conf.database.retry_interval) # Require MongoDB 2.4 to use $setOnInsert if self.conn.server_info()['versionArray'] < [2, 4]: raise storage.StorageBadVersion("Need at least MongoDB 2.4") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) # NOTE(jd) Upgrading is just about creating index, so let's do this # on connection to be sure at least the TTL is correctly updated if # needed. self.upgrade() @staticmethod def update_ttl(ttl, ttl_index_name, index_field, coll): """Update or create time_to_live indexes. :param ttl: time to live in seconds. :param ttl_index_name: name of the index we want to update or create. :param index_field: field with the index that we need to update. :param coll: collection which indexes need to be updated. """ indexes = coll.index_information() if ttl <= 0: if ttl_index_name in indexes: coll.drop_index(ttl_index_name) return if ttl_index_name in indexes: return coll.database.command( 'collMod', coll.name, index={'keyPattern': {index_field: pymongo.ASCENDING}, 'expireAfterSeconds': ttl}) coll.create_index([(index_field, pymongo.ASCENDING)], expireAfterSeconds=ttl, name=ttl_index_name) def upgrade(self): # create collection if not present if 'event' not in self.db.conn.collection_names(): self.db.conn.create_collection('event') # Establish indexes # NOTE(idegtiarov): This indexes cover get_events, get_event_types, and # get_trait_types requests based on event_type and timestamp fields. self.db.event.create_index( [('event_type', pymongo.ASCENDING), ('timestamp', pymongo.ASCENDING)], name='event_type_idx' ) def clear(self): self.conn.drop_database(self.db.name) # Connection will be reopened automatically if needed self.conn.close() def clear_expired_data(self, ttl, max_count=None): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. :param max_count: Number of records to delete (not used for MongoDB). """ self.update_ttl(ttl, 'event_ttl', 'timestamp', self.db.event) LOG.info("Clearing expired event data is based on native " "MongoDB time to live feature and going in background.") ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/impl_sqlalchemy.py0000664000175000017500000005052500000000000021243 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" import collections import datetime from oslo_db import exception as dbexc from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import utils as oslo_sql_utils from oslo_log import log from oslo_utils import importutils from oslo_utils import timeutils import sqlalchemy as sa from sqlalchemy.engine import url as sqlalchemy_url from sqlalchemy.orm import aliased from panko import storage from panko.storage import base from panko.storage import models as api_models from panko.storage.sqlalchemy import models from panko import utils LOG = log.getLogger(__name__) osprofiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy') AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } TRAIT_MAPLIST = [(api_models.Trait.NONE_TYPE, models.TraitText), (api_models.Trait.TEXT_TYPE, models.TraitText), (api_models.Trait.INT_TYPE, models.TraitInt), (api_models.Trait.FLOAT_TYPE, models.TraitFloat), (api_models.Trait.DATETIME_TYPE, models.TraitDatetime)] TRAIT_ID_TO_MODEL = dict((x, y) for x, y in TRAIT_MAPLIST) TRAIT_MODEL_TO_ID = dict((y, x) for x, y in TRAIT_MAPLIST) trait_models_dict = {'string': models.TraitText, 'integer': models.TraitInt, 'datetime': models.TraitDatetime, 'float': models.TraitFloat} def _get_model_and_conditions(trait_type, key, value, op='eq'): trait_model = aliased(trait_models_dict[trait_type]) op_dict = {'eq': (trait_model.value == value), 'lt': (trait_model.value < value), 'le': (trait_model.value <= value), 'gt': (trait_model.value > value), 'ge': (trait_model.value >= value), 'ne': (trait_model.value != value)} conditions = [trait_model.key == key, op_dict[op]] return (trait_model, conditions) class Connection(base.Connection): """Put the event data into a SQLAlchemy database. Tables:: - EventType - event definition - { id: event type id desc: description of event } - Event - event data - { id: event id message_id: message id generated = timestamp of event event_type_id = event type -> eventtype.id } - TraitInt - int trait value - { event_id: event -> event.id key: trait name value: integer value } - TraitDatetime - datetime trait value - { event_id: event -> event.id key: trait name value: datetime value } - TraitText - text trait value - { event_id: event -> event.id key: trait name value: text value } - TraitFloat - float trait value - { event_id: event -> event.id key: trait name value: float value } """ CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def __init__(self, url, conf): # Set max_retries to 0, since oslo.db in certain cases may attempt # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function options = dict(conf.database.items()) options['max_retries'] = 0 # oslo.db doesn't support options defined by Panko for opt in storage.OPTS: options.pop(opt.name, None) self._engine_facade = db_session.EngineFacade(self.dress_url(url), **options) if osprofiler_sqlalchemy: osprofiler_sqlalchemy.add_tracing(sa, self._engine_facade.get_engine(), 'db') @staticmethod def dress_url(url): # If no explicit driver has been set, we default to pymysql if url.startswith("mysql://"): url = sqlalchemy_url.make_url(url) url.drivername = "mysql+pymysql" return str(url) return url def upgrade(self): engine = self._engine_facade.get_engine() models.Base.metadata.create_all(engine) def clear(self): engine = self._engine_facade.get_engine() for table in reversed(models.Base.metadata.sorted_tables): engine.execute(table.delete()) engine.dispose() def _get_or_create_event_type(self, event_type, session): """Check if an event type with the supplied name is already exists. If not, we create it and return the record. This may result in a flush. """ try: with session.begin(nested=True): et = session.query(models.EventType).filter( models.EventType.desc == event_type).first() if not et: et = models.EventType(event_type) session.add(et) except dbexc.DBDuplicateEntry: et = self._get_or_create_event_type(event_type, session) return et def record_events(self, event_models): """Write the events to SQL database via sqlalchemy. :param event_models: a list of model.Event objects. """ session = self._engine_facade.get_session() error = None for event_model in event_models: event = None try: with session.begin(): event_type = self._get_or_create_event_type( event_model.event_type, session=session) event = models.Event(event_model.message_id, event_type, event_model.generated, event_model.raw) session.add(event) session.flush() if event_model.traits: trait_map = {} for trait in event_model.traits: if trait_map.get(trait.dtype) is None: trait_map[trait.dtype] = [] trait_map[trait.dtype].append( {'event_id': event.id, 'key': trait.name, 'value': trait.value}) for dtype in trait_map.keys(): model = TRAIT_ID_TO_MODEL[dtype] session.execute(model.__table__.insert(), trait_map[dtype]) except dbexc.DBDuplicateEntry as e: LOG.debug("Duplicate event detected, skipping it: %s", e) except KeyError as e: LOG.exception('Failed to record event: %s', e) except Exception as e: LOG.exception('Failed to record event: %s', e) error = e if error: raise error def _get_pagination_query(self, query, pagination, api_model, model): limit = pagination.get('limit') marker = None if pagination.get('marker'): marker_filter = storage.EventFilter( message_id=pagination.get('marker')) markers = list(self.get_events(marker_filter)) if markers: marker = markers[0] else: raise storage.InvalidMarker( 'Marker %s not found.' % pagination['marker']) if not pagination.get('sort'): pagination['sort'] = api_model.DEFAULT_SORT sort_keys = [s[0] for s in pagination['sort']] sort_dirs = [s[1] for s in pagination['sort']] return oslo_sql_utils.paginate_query( query, model, limit, sort_keys, sort_dirs=sort_dirs, marker=marker) def get_events(self, event_filter, pagination=None): """Return an iterable of model.Event objects. :param event_filter: EventFilter instance :param pagination: Pagination parameters. """ pagination = pagination or {} session = self._engine_facade.get_session() with session.begin(): # Build up the join conditions event_join_conditions = [models.EventType.id == models.Event.event_type_id] if event_filter.event_type: event_join_conditions.append(models.EventType.desc == event_filter.event_type) # Build up the where conditions event_filter_conditions = [] if event_filter.message_id: event_filter_conditions.append( models.Event.message_id == event_filter.message_id) if event_filter.start_timestamp: event_filter_conditions.append( models.Event.generated >= event_filter.start_timestamp) if event_filter.end_timestamp: event_filter_conditions.append( models.Event.generated <= event_filter.end_timestamp) trait_subq = None # Build trait filter if event_filter.traits_filter: filters = list(event_filter.traits_filter) trait_filter = filters.pop() key = trait_filter.pop('key') op = trait_filter.pop('op', 'eq') trait_type, value = list(trait_filter.items())[0] trait_model, conditions = _get_model_and_conditions( trait_type, key, value, op) trait_subq = (session .query(trait_model.event_id.label('ev_id')) .filter(*conditions)) first_model = trait_model for label_num, trait_filter in enumerate(filters): key = trait_filter.pop('key') op = trait_filter.pop('op', 'eq') trait_type, value = list(trait_filter.items())[0] trait_model, conditions = _get_model_and_conditions( trait_type, key, value, op) trait_subq = ( trait_subq .add_columns( trait_model.event_id.label('l%d' % label_num)) .filter( first_model.event_id == trait_model.event_id, *conditions)) trait_subq = trait_subq.subquery() query = (session.query(models.Event.id) .join(models.EventType, sa.and_(*event_join_conditions))) if trait_subq is not None: query = query.join(trait_subq, trait_subq.c.ev_id == models.Event.id) if event_filter.admin_proj: no_proj_q = session.query(models.TraitText.event_id).filter( models.TraitText.key == 'project_id') admin_q = (session.query(models.TraitText.event_id).filter( ~sa.exists().where(models.TraitText.event_id == no_proj_q.subquery().c.event_id)).union( session.query(models.TraitText.event_id).filter(sa.and_( models.TraitText.key == 'project_id', models.TraitText.value == event_filter.admin_proj, models.Event.id == models.TraitText.event_id)))) query = query.filter(sa.exists().where( models.Event.id == admin_q.subquery().c.trait_text_event_id)) if event_filter_conditions: query = query.filter(sa.and_(*event_filter_conditions)) query = self._get_pagination_query( query, pagination, api_models.Event, models.Event) event_list = collections.OrderedDict() # get a list of all events that match filters for (id_, generated, message_id, desc, raw) in query.add_columns( models.Event.generated, models.Event.message_id, models.EventType.desc, models.Event.raw).all(): event_list[id_] = api_models.Event( message_id, desc, generated, [], raw) # Query all traits related to events. # NOTE (gordc): cast is done because pgsql defaults to TEXT when # handling unknown values such as null. trait_q = ( session.query( models.TraitDatetime.event_id, models.TraitDatetime.key, models.TraitDatetime.value, sa.cast(sa.null(), sa.Integer), sa.cast(sa.null(), sa.Float(53)), sa.cast(sa.null(), sa.String(255))) .filter(sa.exists().where( models.TraitDatetime.event_id == query.subquery().c.id)) ).union_all( session.query( models.TraitInt.event_id, models.TraitInt.key, sa.null(), models.TraitInt.value, sa.null(), sa.null()) .filter(sa.exists().where( models.TraitInt.event_id == query.subquery().c.id)), session.query( models.TraitFloat.event_id, models.TraitFloat.key, sa.null(), sa.null(), models.TraitFloat.value, sa.null()) .filter(sa.exists().where( models.TraitFloat.event_id == query.subquery().c.id)), session.query( models.TraitText.event_id, models.TraitText.key, sa.null(), sa.null(), sa.null(), models.TraitText.value) .filter(sa.exists().where( models.TraitText.event_id == query.subquery().c.id))) for id_, key, t_date, t_int, t_float, t_text in ( trait_q.order_by(models.TraitDatetime.key)).all(): if t_int is not None: dtype = api_models.Trait.INT_TYPE val = t_int elif t_float is not None: dtype = api_models.Trait.FLOAT_TYPE val = t_float elif t_date is not None: dtype = api_models.Trait.DATETIME_TYPE val = t_date else: dtype = api_models.Trait.TEXT_TYPE val = t_text try: trait_model = api_models.Trait(key, dtype, val) event_list[id_].append_trait(trait_model) except KeyError: # NOTE(gordc): this is expected as we do not set REPEATABLE # READ (bug 1506717). if query is run while recording new # event data, trait query may return more data than event # query. they can be safely discarded. pass return event_list.values() def get_event_types(self): """Return all event types as an iterable of strings.""" session = self._engine_facade.get_session() with session.begin(): query = (session.query(models.EventType.desc). order_by(models.EventType.desc)) for name in query.all(): # The query returns a tuple with one element. yield name[0] def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event """ session = self._engine_facade.get_session() with session.begin(): for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: query = (session.query(trait_model.key) .join(models.Event, models.Event.id == trait_model.event_id) .join(models.EventType, sa.and_(models.EventType.id == models.Event.event_type_id, models.EventType.desc == event_type)) .distinct()) dtype = TRAIT_MODEL_TO_ID.get(trait_model) for row in query.all(): yield {'name': row[0], 'data_type': dtype} def get_traits(self, event_type, trait_type=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_type: the name of the Trait to filter by """ session = self._engine_facade.get_session() with session.begin(): for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: query = (session.query(trait_model.key, trait_model.value) .join(models.Event, models.Event.id == trait_model.event_id) .join(models.EventType, sa.and_(models.EventType.id == models.Event.event_type_id, models.EventType.desc == event_type)) .order_by(trait_model.key)) if trait_type: query = query.filter(trait_model.key == trait_type) dtype = TRAIT_MODEL_TO_ID.get(trait_model) for k, v in query.all(): yield api_models.Trait(name=k, dtype=dtype, value=v) def clear_expired_data(self, ttl, max_count): """Clear expired data from the backend storage system. Clearing occurs according to the time-to-live. :param ttl: Number of seconds to keep records for. :param max_count: Number of records to delete. """ session = self._engine_facade.get_session() with session.begin(): end = timeutils.utcnow() - datetime.timedelta(seconds=ttl) event_q = (session.query(models.Event.id) .filter(models.Event.generated < end)) # NOTE(e0ne): it's not an optiomal from the performance point of # view but it works with all databases. ids = [i[0] for i in event_q.limit(max_count)] for trait_model in [models.TraitText, models.TraitInt, models.TraitFloat, models.TraitDatetime]: session.query(trait_model).filter( trait_model.event_id.in_(ids) ).delete(synchronize_session="fetch") event_rows = session.query(models.Event).filter( models.Event.id.in_(ids) ).delete(synchronize_session="fetch") # remove EventType and TraitType with no corresponding # matching events (session.query(models.EventType) .filter(~models.EventType.events.any()) .delete(synchronize_session="fetch")) LOG.info("%d events are removed from database", event_rows) return event_rows ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/models.py0000664000175000017500000001042000000000000017331 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the events storage API. """ from oslo_utils import timeutils from panko.storage import base def serialize_dt(value): """Serializes parameter if it is datetime.""" return value.isoformat() if hasattr(value, 'isoformat') else value class Event(base.Model): """A raw event from the source system. Events have Traits. Metrics will be derived from one or more Events. """ DUPLICATE = 1 UNKNOWN_PROBLEM = 2 INCOMPATIBLE_TRAIT = 3 SUPPORT_DIRS = ('asc', 'desc') SUPPORT_SORT_KEYS = ('message_id', 'generated') DEFAULT_DIR = 'asc' DEFAULT_SORT = [('generated', 'asc'), ('message_id', 'asc')] PRIMARY_KEY = 'message_id' def __init__(self, message_id, event_type, generated, traits, raw): """Create a new event. :param message_id: Unique ID for the message this event stemmed from. This is different than the Event ID, which comes from the underlying storage system. :param event_type: The type of the event. :param generated: UTC time for when the event occurred. :param traits: list of Traits on this Event. :param raw: Unindexed raw notification details. """ base.Model.__init__(self, message_id=message_id, event_type=event_type, generated=generated, traits=traits, raw=raw) def append_trait(self, trait_model): self.traits.append(trait_model) def __repr__(self): trait_list = [] if self.traits: trait_list = [str(trait) for trait in self.traits] return ("" % (self.message_id, self.event_type, self.generated, " ".join(trait_list))) def serialize(self): return {'message_id': self.message_id, 'event_type': self.event_type, 'generated': serialize_dt(self.generated), 'traits': [trait.serialize() for trait in self.traits], 'raw': self.raw} class Trait(base.Model): """A Trait is a key/value pair of data on an Event. The value is variant record of basic data types (int, date, float, etc). """ NONE_TYPE = 0 TEXT_TYPE = 1 INT_TYPE = 2 FLOAT_TYPE = 3 DATETIME_TYPE = 4 type_names = { NONE_TYPE: "none", TEXT_TYPE: "string", INT_TYPE: "integer", FLOAT_TYPE: "float", DATETIME_TYPE: "datetime" } def __init__(self, name, dtype, value): if not dtype: dtype = Trait.NONE_TYPE base.Model.__init__(self, name=name, dtype=dtype, value=value) def __repr__(self): return "" % (self.name, self.dtype, self.value) def serialize(self): return self.name, self.dtype, serialize_dt(self.value) def get_type_name(self): return self.get_name_by_type(self.dtype) @classmethod def get_type_by_name(cls, type_name): return getattr(cls, '%s_TYPE' % type_name.upper(), None) @classmethod def get_type_names(cls): return cls.type_names.values() @classmethod def get_name_by_type(cls, type_id): return cls.type_names.get(type_id, "none") @classmethod def convert_value(cls, trait_type, value): if trait_type is cls.INT_TYPE: return int(value) if trait_type is cls.FLOAT_TYPE: return float(value) if trait_type is cls.DATETIME_TYPE: return timeutils.normalize_time(timeutils.parse_isotime(value)) # Cropping the text value to match the TraitText value size if isinstance(value, bytes): return value.decode('utf-8')[:255] return str(value)[:255] ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4741726 panko-10.0.0/panko/storage/mongo/0000775000175000017500000000000000000000000016616 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/mongo/__init__.py0000664000175000017500000000000000000000000020715 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/mongo/utils.py0000664000175000017500000002067500000000000020342 0ustar00zuulzuul00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB backend """ import weakref from oslo_log import log from oslo_utils import netutils import pymongo import pymongo.errors import tenacity from panko.i18n import _ ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS = 86 LOG = log.getLogger(__name__) EVENT_TRAIT_TYPES = {'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4} OP_SIGN = {'lt': '$lt', 'le': '$lte', 'ne': '$ne', 'gt': '$gt', 'ge': '$gte'} MINIMUM_COMPATIBLE_MONGODB_VERSION = [2, 4] COMPLETE_AGGREGATE_COMPATIBLE_VERSION = [2, 6] def make_timestamp_range(start, end, start_timestamp_op=None, end_timestamp_op=None): """Create the query document to find timestamps within that range. This is done by given two possible datetimes and their operations. By default, using $gte for the lower bound and $lt for the upper bound. """ ts_range = {} if start: if start_timestamp_op == 'gt': start_timestamp_op = '$gt' else: start_timestamp_op = '$gte' ts_range[start_timestamp_op] = start if end: if end_timestamp_op == 'le': end_timestamp_op = '$lte' else: end_timestamp_op = '$lt' ts_range[end_timestamp_op] = end return ts_range def make_events_query_from_filter(event_filter): """Return start and stop row for filtering and a query. Query is based on the selected parameter. :param event_filter: storage.EventFilter object. """ query = {} q_list = [] ts_range = make_timestamp_range(event_filter.start_timestamp, event_filter.end_timestamp) if ts_range: q_list.append({'timestamp': ts_range}) if event_filter.event_type: q_list.append({'event_type': event_filter.event_type}) if event_filter.message_id: q_list.append({'_id': event_filter.message_id}) if event_filter.traits_filter: for trait_filter in event_filter.traits_filter: op = trait_filter.pop('op', 'eq') dict_query = {} for k, v in trait_filter.items(): if v is not None: # All parameters in EventFilter['traits'] are optional, so # we need to check if they are in the query or no. if k == 'key': dict_query.setdefault('trait_name', v) elif k in ['string', 'integer', 'datetime', 'float']: dict_query.setdefault('trait_type', EVENT_TRAIT_TYPES[k]) dict_query.setdefault('trait_value', v if op == 'eq' else {OP_SIGN[op]: v}) dict_query = {'$elemMatch': dict_query} q_list.append({'traits': dict_query}) if event_filter.admin_proj: q_list.append({'$or': [ {'traits': {'$not': {'$elemMatch': {'trait_name': 'project_id'}}}}, {'traits': { '$elemMatch': {'trait_name': 'project_id', 'trait_value': event_filter.admin_proj}}}]}) if q_list: query = {'$and': q_list} return query class ConnectionPool(object): def __init__(self): self._pool = {} def connect(self, url, max_retries, retry_interval): connection_options = pymongo.uri_parser.parse_uri(url) del connection_options['database'] del connection_options['username'] del connection_options['password'] del connection_options['collection'] pool_key = tuple(connection_options) if pool_key in self._pool: client = self._pool.get(pool_key)() if client: return client splitted_url = netutils.urlsplit(url) log_data = {'db': splitted_url.scheme, 'nodelist': connection_options['nodelist']} LOG.info('Connecting to %(db)s on %(nodelist)s' % log_data) try: client = MongoProxy(pymongo.MongoClient(url), max_retries, retry_interval) except pymongo.errors.ConnectionFailure as e: LOG.warning(_('Unable to connect to the database server: ' '%(errmsg)s.') % {'errmsg': e}) raise self._pool[pool_key] = weakref.ref(client) return client def _safe_mongo_call(max_retries, retry_interval): return tenacity.retry( retry=tenacity.retry_if_exception_type( pymongo.errors.AutoReconnect), wait=tenacity.wait_fixed(retry_interval), stop=(tenacity.stop_after_attempt(max_retries) if max_retries >= 0 else tenacity.stop_never) ) MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection) if not typ.startswith('_')]) MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient) if not typ.startswith('_')])) MONGO_METHODS.update(set([typ for typ in dir(pymongo) if not typ.startswith('_')])) class MongoProxy(object): def __init__(self, conn, max_retries, retry_interval): self.conn = conn self.max_retries = max_retries self.retry_interval = retry_interval self._recreate_index = _safe_mongo_call( self.max_retries, self.retry_interval)(self._recreate_index) def __getitem__(self, item): """Create and return proxy around the method in the connection. :param item: name of the connection """ return MongoProxy(self.conn[item]) def find(self, *args, **kwargs): # We need this modifying method to return a CursorProxy object so that # we can handle the Cursor next function to catch the AutoReconnect # exception. return CursorProxy(self.conn.find(*args, **kwargs), self.max_retries, self.retry_interval) def create_index(self, keys, name=None, *args, **kwargs): try: self.conn.create_index(keys, name=name, *args, **kwargs) except pymongo.errors.OperationFailure as e: if e.code is ERROR_INDEX_WITH_DIFFERENT_SPEC_ALREADY_EXISTS: LOG.info("Index %s will be recreate." % name) self._recreate_index(keys, name, *args, **kwargs) def _recreate_index(self, keys, name, *args, **kwargs): self.conn.drop_index(name) self.conn.create_index(keys, name=name, *args, **kwargs) def __getattr__(self, item): """Wrap MongoDB connection. If item is the name of an executable method, for example find or insert, wrap this method in the MongoConn. Else wrap getting attribute with MongoProxy. """ if item in ('name', 'database'): return getattr(self.conn, item) if item in MONGO_METHODS: return _safe_mongo_call( self.max_retries, self.retry_interval )(getattr(self.conn, item)) return MongoProxy(getattr(self.conn, item), self.max_retries, self.retry_interval) def __call__(self, *args, **kwargs): return self.conn(*args, **kwargs) class CursorProxy(pymongo.cursor.Cursor): def __init__(self, cursor, max_retry, retry_interval): self.cursor = cursor self.next = _safe_mongo_call(max_retry, retry_interval)(self._next) def __getitem__(self, item): return self.cursor[item] def _next(self): """Wrap Cursor next method. This method will be executed before each Cursor next method call. """ try: save_cursor = self.cursor.clone() return self.cursor.next() except pymongo.errors.AutoReconnect: self.cursor = save_cursor raise def __getattr__(self, item): return getattr(self.cursor, item) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/pymongo_base.py0000664000175000017500000001370500000000000020541 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB backend """ from oslo_log import log import pymongo from panko.storage import base from panko.storage import models from panko.storage.mongo import utils as pymongo_utils from panko import utils LOG = log.getLogger(__name__) COMMON_AVAILABLE_CAPABILITIES = { 'events': {'query': {'simple': True}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Base event Connection class for MongoDB driver.""" CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = utils.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def record_events(self, event_models): """Write the events to database. :param event_models: a list of models.Event objects. """ error = None for event_model in event_models: traits = [] if event_model.traits: for trait in event_model.traits: traits.append({'trait_name': trait.name, 'trait_type': trait.dtype, 'trait_value': trait.value}) try: self.db.event.insert_one( {'_id': event_model.message_id, 'event_type': event_model.event_type, 'timestamp': event_model.generated, 'traits': traits, 'raw': event_model.raw}) except pymongo.errors.DuplicateKeyError as ex: LOG.debug("Duplicate event detected, skipping it: %s", ex) except Exception as ex: LOG.exception("Failed to record event: %s", ex) error = ex if error: raise error def get_events(self, event_filter, pagination=None): """Return an iter of models.Event objects. :param event_filter: storage.EventFilter object, consists of filters for events that are stored in database. :param pagination: Pagination parameters. """ limit = None if pagination: if pagination.get('sort'): LOG.warning('Driver does not support sort functionality') limit = pagination.get('limit') if limit == 0: return q = pymongo_utils.make_events_query_from_filter(event_filter) if limit is not None: results = self.db.event.find(q, limit=limit) else: results = self.db.event.find(q) for event in results: traits = [] for trait in event['traits']: traits.append(models.Trait(name=trait['trait_name'], dtype=int(trait['trait_type']), value=trait['trait_value'])) yield models.Event(message_id=event['_id'], event_type=event['event_type'], generated=event['timestamp'], traits=traits, raw=event.get('raw')) def get_event_types(self): """Return all event types as an iter of strings.""" return self.db.event.distinct('event_type') def get_trait_types(self, event_type): """Return a dictionary containing the name and data type of the trait. Only trait types for the provided event_type are returned. :param event_type: the type of the Event. """ trait_names = set() events = self.db.event.find({'event_type': event_type}) for event in events: for trait in event['traits']: trait_name = trait['trait_name'] if trait_name not in trait_names: # Here we check that our method return only unique # trait types. Method will return only one trait type. It # is proposed that certain trait name could have only one # trait type. trait_names.add(trait_name) yield {'name': trait_name, 'data_type': trait['trait_type']} def get_traits(self, event_type, trait_name=None): """Return all trait instances associated with an event_type. If trait_type is specified, only return instances of that trait type. :param event_type: the type of the Event to filter by :param trait_name: the name of the Trait to filter by """ if not trait_name: events = self.db.event.find({'event_type': event_type}) else: # We choose events that simultaneously have event_type and certain # trait_name, and retrieve events contains only mentioned traits. events = self.db.event.find({'$and': [{'event_type': event_type}, {'traits.trait_name': trait_name}]}, {'traits': {'$elemMatch': {'trait_name': trait_name}} }) for event in events: for trait in event['traits']: yield models.Trait(name=trait['trait_name'], dtype=trait['trait_type'], value=trait['trait_value']) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4741726 panko-10.0.0/panko/storage/sqlalchemy/0000775000175000017500000000000000000000000017641 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/sqlalchemy/__init__.py0000664000175000017500000000000000000000000021740 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4781728 panko-10.0.0/panko/storage/sqlalchemy/alembic/0000775000175000017500000000000000000000000021235 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/sqlalchemy/alembic/README0000664000175000017500000000004600000000000022115 0ustar00zuulzuul00000000000000Generic single-database configuration.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/sqlalchemy/alembic/alembic.ini0000664000175000017500000000321000000000000023326 0ustar00zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = panko.storage.sqlalchemy:alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # timezone to use when rendering the date # within the migration file as well as the filename. # string value is passed to dateutil.tz.gettz() # leave blank for localtime # timezone = # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false # set to 'true' to allow .pyc and .pyo files without # a source .py file to be detected as revisions in the # versions/ directory # sourceless = false # version location specification; this defaults # to alembic/versions. When using multiple version # directories, initial revisions must be specified with --version-path # version_locations = %(here)s/bar %(here)s/bat alembic/versions # the output encoding used when revision files # are written from script.py.mako # output_encoding = utf-8 sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/sqlalchemy/alembic/env.py0000664000175000017500000000515000000000000022400 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from alembic import config as alembic_config from alembic import context from sqlalchemy import engine_from_config, pool from logging.config import fileConfig # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = alembic_config.Config(os.path.join(os.path.dirname(__file__), 'alembic.ini')) # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = None # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ url = config.get_main_option("sqlalchemy.url") context.configure( url=url, target_metadata=target_metadata, literal_binds=True) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ connectable = engine_from_config( config.get_section(config.config_ini_section), prefix='sqlalchemy.', poolclass=pool.NullPool) with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/sqlalchemy/alembic/script.py.mako0000664000175000017500000000075600000000000024051 0ustar00zuulzuul00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ from alembic import op import sqlalchemy as sa ${imports if imports else ""} # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4781728 panko-10.0.0/panko/storage/sqlalchemy/alembic/versions/0000775000175000017500000000000000000000000023105 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/sqlalchemy/alembic/versions/c3955547bff2_support_big_integer_traits.py0000664000175000017500000000162300000000000032751 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """support big integer traits Revision ID: c3955547bff2 Revises: Create Date: 2017-07-18 22:03:44.996571 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'c3955547bff2' down_revision = None branch_labels = None depends_on = None def upgrade(): op.alter_column('trait_int', "value", type_=sa.BigInteger) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/storage/sqlalchemy/models.py0000664000175000017500000001274400000000000021506 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for Panko data. """ from oslo_serialization import jsonutils import sqlalchemy from sqlalchemy import Column, Integer, String, ForeignKey, Index from sqlalchemy import BigInteger, Float, DateTime from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import deferred from sqlalchemy.orm import relationship from sqlalchemy.types import TypeDecorator from panko import utils class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = sqlalchemy.Text @staticmethod def process_bind_param(value, dialect): if value is not None: value = jsonutils.dumps(value) return value @staticmethod def process_result_value(value, dialect): if value is not None: value = jsonutils.loads(value) return value class PreciseTimestamp(TypeDecorator): """Represents a timestamp precise to the microsecond.""" impl = DateTime def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(DECIMAL(precision=20, scale=6, asdecimal=True)) return self.impl @staticmethod def process_bind_param(value, dialect): if value is None: return value elif dialect.name == 'mysql': return utils.dt_to_decimal(value) return value @staticmethod def process_result_value(value, dialect): if value is None: return value elif dialect.name == 'mysql': return utils.decimal_to_dt(value) return value class PankoBase(object): """Base class for Panko Models.""" __table_args__ = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} __table_initialized__ = False def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def update(self, values): """Make the model object behave like a dict.""" for k, v in values.items(): setattr(self, k, v) Base = declarative_base(cls=PankoBase) class EventType(Base): """Types of event records.""" __tablename__ = 'event_type' id = Column(Integer, primary_key=True) desc = Column(String(255), unique=True) def __init__(self, event_type): self.desc = event_type def __repr__(self): return "" % self.desc class Event(Base): __tablename__ = 'event' __table_args__ = ( Index('ix_event_message_id', 'message_id'), Index('ix_event_type_id', 'event_type_id'), Index('ix_event_generated', 'generated') ) id = Column(Integer, primary_key=True) message_id = Column(String(50), unique=True) generated = Column(PreciseTimestamp()) raw = deferred(Column(JSONEncodedDict())) event_type_id = Column(Integer, ForeignKey('event_type.id')) event_type = relationship("EventType", backref='events') def __init__(self, message_id, event_type, generated, raw): self.message_id = message_id self.event_type = event_type self.generated = generated self.raw = raw def __repr__(self): return "" % (self.id, self.message_id, self.event_type, self.generated) class TraitText(Base): """Event text traits.""" __tablename__ = 'trait_text' __table_args__ = ( Index('ix_trait_text_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(String(255)) class TraitInt(Base): """Event integer traits.""" __tablename__ = 'trait_int' __table_args__ = ( Index('ix_trait_int_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(BigInteger) class TraitFloat(Base): """Event float traits.""" __tablename__ = 'trait_float' __table_args__ = ( Index('ix_trait_float_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(Float(53)) class TraitDatetime(Base): """Event datetime traits.""" __tablename__ = 'trait_datetime' __table_args__ = ( Index('ix_trait_datetime_event_id_key', 'event_id', 'key'), ) event_id = Column(Integer, ForeignKey('event.id'), primary_key=True) key = Column(String(255), primary_key=True) value = Column(PreciseTimestamp()) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4781728 panko-10.0.0/panko/tests/0000775000175000017500000000000000000000000015175 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/__init__.py0000664000175000017500000000000000000000000017274 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/base.py0000664000175000017500000000550300000000000016464 0ustar00zuulzuul00000000000000# Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test base classes. """ import functools import os.path from oslo_utils import timeutils from oslotest import base from testtools import testcase import webtest import panko class BaseTestCase(base.BaseTestCase): def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. This relies on assertAlmostEqual to avoid rounding problem, and only checks up the first microsecond values. """ return self.assertAlmostEqual( timeutils.delta_seconds(first, second), 0.0, places=5) def assertIsEmpty(self, obj): try: if len(obj) != 0: self.fail("%s is not empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) def assertIsNotEmpty(self, obj): try: if len(obj) == 0: self.fail("%s is empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) @staticmethod def path_get(project_file=None): root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root def _skip_decorator(func): @functools.wraps(func) def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except panko.NotImplementedError as e: raise testcase.TestSkipped(str(e)) except webtest.app.AppError as e: if 'not implemented' in str(e): raise testcase.TestSkipped(str(e)) raise return skip_if_not_implemented class SkipNotImplementedMeta(type): def __new__(cls, name, bases, local): for attr in local: value = local[attr] if callable(value) and ( attr.startswith('test_') or attr == 'setUp'): local[attr] = _skip_decorator(value) return type.__new__(cls, name, bases, local) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/db.py0000664000175000017500000001645100000000000016143 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests.""" import os from unittest import mock from urllib import parse as urlparse import warnings import fixtures from oslo_utils import uuidutils import sqlalchemy from testtools import testcase from panko import service from panko import storage from panko.tests import base as test_base try: from panko.tests import mocks except ImportError: mocks = None # happybase module is not Python 3 compatible yet class MongoDbManager(fixtures.Fixture): def __init__(self, url, conf): self._url = url self.conf = conf def setUp(self): super(MongoDbManager, self).setUp() with warnings.catch_warnings(): warnings.filterwarnings( action='ignore', message='.*you must provide a username and password.*') try: self.connection = storage.get_connection(self.url, self.conf) except storage.StorageBadVersion as e: raise testcase.TestSkipped(str(e)) @property def url(self): return '%(url)s_%(db)s' % { 'url': self._url, 'db': uuidutils.generate_uuid(dashed=False) } class SQLManager(fixtures.Fixture): def __init__(self, url, conf): db_name = 'panko_%s' % uuidutils.generate_uuid(dashed=False) engine = sqlalchemy.create_engine(url) conn = engine.connect() self._create_database(conn, db_name) conn.close() engine.dispose() parsed = list(urlparse.urlparse(url)) parsed[2] = '/' + db_name self.url = urlparse.urlunparse(parsed) self.conf = conf def setUp(self): super(SQLManager, self).setUp() self.connection = storage.get_connection(self.url, self.conf) class PgSQLManager(SQLManager): @staticmethod def _create_database(conn, db_name): conn.connection.set_isolation_level(0) conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name) conn.connection.set_isolation_level(1) class MySQLManager(SQLManager): @staticmethod def _create_database(conn, db_name): conn.execute('CREATE DATABASE %s;' % db_name) class ElasticSearchManager(fixtures.Fixture): def __init__(self, url, conf): self.url = url self.conf = conf def setUp(self): super(ElasticSearchManager, self).setUp() self.connection = storage.get_connection( self.url, self.conf) # prefix each test with unique index name inx_uuid = uuidutils.generate_uuid(dashed=False) self.connection.index_name = 'events_%s' % inx_uuid # force index on write so data is queryable right away self.connection._refresh_on_write = True class HBaseManager(fixtures.Fixture): def __init__(self, url, conf): self._url = url self.conf = conf def setUp(self): super(HBaseManager, self).setUp() self.connection = storage.get_connection( self.url, self.conf) # Unique prefix for each test to keep data is distinguished because # all test data is stored in one table data_prefix = uuidutils.generate_uuid(dashed=False) def table(conn, name): return mocks.MockHBaseTable(name, conn, data_prefix) # Mock only real HBase connection, MConnection "table" method # stays origin. mock.patch('happybase.Connection.table', new=table).start() # We shouldn't delete data and tables after each test, # because it last for too long. # All tests tables will be deleted in setup-test-env.sh mock.patch("happybase.Connection.disable_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.delete_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.create_table", new=mock.MagicMock()).start() @property def url(self): return '%s?table_prefix=%s&table_prefix_separator=%s' % ( self._url, os.getenv("PANKO_TEST_HBASE_TABLE_PREFIX", "test"), os.getenv("PANKO_TEST_HBASE_TABLE_PREFIX_SEPARATOR", "_") ) class SQLiteManager(fixtures.Fixture): def __init__(self, url, conf): self.url = url self.conf = conf def setUp(self): super(SQLiteManager, self).setUp() self.connection = storage.get_connection( self.url, self.conf) class TestBase(test_base.BaseTestCase, metaclass=test_base.SkipNotImplementedMeta): DRIVER_MANAGERS = { 'mongodb': MongoDbManager, 'mysql': MySQLManager, 'postgresql': PgSQLManager, 'sqlite': SQLiteManager, 'es': ElasticSearchManager, } if mocks is not None: DRIVER_MANAGERS['hbase'] = HBaseManager def setUp(self): super(TestBase, self).setUp() db_url = os.environ.get('PIFPAF_URL', "sqlite://").replace( "mysql://", "mysql+pymysql://") engine = urlparse.urlparse(db_url).scheme # in case some drivers have additional specification, for example: # PyMySQL will have scheme mysql+pymysql engine = engine.split('+')[0] # NOTE(Alexei_987) Shortcut to skip expensive db setUp test_method = self._get_test_method() if (hasattr(test_method, '_run_with') and engine not in test_method._run_with): raise testcase.TestSkipped( 'Test is not applicable for %s' % engine) self.CONF = service.prepare_service([], []) manager = self.DRIVER_MANAGERS.get(engine) if not manager: self.skipTest("missing driver manager: %s" % engine) self.db_manager = manager(db_url, self.CONF) self.useFixture(self.db_manager) self.conn = self.db_manager.connection self.conn.upgrade() self.useFixture(fixtures.MockPatch('panko.storage.get_connection', side_effect=self._get_connection)) def tearDown(self): self.conn.clear() self.conn = None super(TestBase, self).tearDown() def _get_connection(self, url, conf): return self.conn def run_with(*drivers): """Used to mark tests that are only applicable for certain db driver. Skips test if driver is not available. """ def decorator(test): if isinstance(test, type) and issubclass(test, TestBase): # Decorate all test methods for attr in dir(test): value = getattr(test, attr) if callable(value) and attr.startswith('test_'): value._run_with = drivers else: test._run_with = drivers return test return decorator ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4781728 panko-10.0.0/panko/tests/functional/0000775000175000017500000000000000000000000017337 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/__init__.py0000664000175000017500000000000000000000000021436 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4781728 panko-10.0.0/panko/tests/functional/api/0000775000175000017500000000000000000000000020110 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/api/__init__.py0000664000175000017500000001520600000000000022225 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests. """ from oslo_policy import opts import webtest from panko.api import app from panko.api import rbac from panko import service from panko.tests import db as db_test_base class FunctionalTest(db_test_base.TestBase): """Used for functional tests of Pecan controllers. Used in case when you need to test your literal application and its integration with the framework. """ PATH_PREFIX = '' def setUp(self): super(FunctionalTest, self).setUp() self.CONF = service.prepare_service([], []) opts.set_defaults(self.CONF) self.CONF.set_override('api_paste_config', self.path_get('etc/panko/api_paste.ini')) self.app = self._make_app(self.CONF) @staticmethod def _make_app(conf): return webtest.TestApp(app.load_app(conf, appname='panko+noauth')) def tearDown(self): super(FunctionalTest, self).tearDown() rbac.reset() def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ return self.post_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) return response def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=None, groupby=None, status=None, override_params=None, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param groupby: list of fields to group by :param status: Expected status code of response :param override_params: literally encoded query param string :param params: content for wsgi.input of request """ q = q or [] groupby = groupby or [] full_path = self.PATH_PREFIX + path if override_params: all_params = override_params else: query_params = {'q.field': [], 'q.value': [], 'q.op': [], 'q.type': [], } for query in q: for name in ['field', 'op', 'value', 'type']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) if groupby: all_params.update({'groupby': groupby}) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors, status=status) if not expect_errors: response = response.json return response ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4781728 panko-10.0.0/panko/tests/functional/api/v2/0000775000175000017500000000000000000000000020437 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/api/v2/__init__.py0000664000175000017500000000130500000000000022547 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from panko.tests.functional import api class FunctionalTest(api.FunctionalTest): PATH_PREFIX = '/v2' ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/api/v2/test_acl_scenarios.py0000664000175000017500000001442300000000000024661 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test ACL.""" import datetime import os from keystonemiddleware import fixture as ksm_fixture from oslo_utils import fileutils from oslo_utils import uuidutils import webtest from panko.api import app from panko.storage import models from panko.tests.functional.api import v2 VALID_TOKEN = uuidutils.generate_uuid(dashed=False) VALID_TOKEN2 = uuidutils.generate_uuid(dashed=False) class TestAPIACL(v2.FunctionalTest): def setUp(self): super(TestAPIACL, self).setUp() self.auth_token_fixture = self.useFixture( ksm_fixture.AuthTokenFixture()) self.auth_token_fixture.add_token_data( token_id=VALID_TOKEN, # FIXME(morganfainberg): The project-id should be a proper uuid project_id='123i2910', role_list=['admin'], user_name='user_id2', user_id='user_id2', is_v2=True ) self.auth_token_fixture.add_token_data( token_id=VALID_TOKEN2, # FIXME(morganfainberg): The project-id should be a proper uuid project_id='project-good', role_list=['Member'], user_name='user_id1', user_id='user_id1', is_v2=True) def get_json(self, path, expect_errors=False, headers=None, q=None, **params): return super(TestAPIACL, self).get_json(path, expect_errors=expect_errors, headers=headers, q=q or [], **params) @staticmethod def _make_app(conf): return webtest.TestApp(app.load_app(conf, appname='panko+keystone')) class TestAPIEventACL(TestAPIACL): PATH = '/events' def test_non_admin_get_event_types(self): data = self.get_json('/event_types', expect_errors=True, headers={"X-Roles": "Member", "X-Auth-Token": VALID_TOKEN2, "X-Project-Id": "project-good"}) self.assertEqual(401, data.status_int) class TestBaseApiEventRBAC(v2.FunctionalTest): PATH = '/events' def setUp(self): super(TestBaseApiEventRBAC, self).setUp() traits = [models.Trait('project_id', 1, 'project-good'), models.Trait('user_id', 1, 'user-good')] self.message_id = uuidutils.generate_uuid() ev = models.Event(self.message_id, 'event_type', datetime.datetime.now(), traits, {}) self.conn.record_events([ev]) def test_get_events_without_project(self): headers_no_proj = {"X-Roles": "admin", "X-User-Id": "user-good"} resp = self.get_json(self.PATH, expect_errors=True, headers=headers_no_proj, status=403) self.assertEqual(403, resp.status_int) def test_get_events_without_user(self): headers_no_user = {"X-Roles": "admin", "X-Project-Id": "project-good"} resp = self.get_json(self.PATH, expect_errors=True, headers=headers_no_user, status=403) self.assertEqual(403, resp.status_int) def test_get_events_without_scope(self): headers_no_user_proj = {"X-Roles": "admin"} resp = self.get_json(self.PATH, expect_errors=True, headers=headers_no_user_proj, status=403) self.assertEqual(403, resp.status_int) def test_get_events(self): headers = {"X-Roles": "Member", "X-User-Id": "user-good", "X-Project-Id": "project-good"} self.get_json(self.PATH, headers=headers, status=200) def test_get_event(self): headers = {"X-Roles": "Member", "X-User-Id": "user-good", "X-Project-Id": "project-good"} self.get_json(self.PATH + "/" + self.message_id, headers=headers, status=200) class TestApiEventAdminRBAC(TestBaseApiEventRBAC): def _make_app(self, conf): content = ('{"context_is_admin": "role:admin",' '"telemetry:events:index": "rule:context_is_admin",' '"telemetry:events:show": "rule:context_is_admin"}') content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='policy', suffix='.json') conf.set_override("policy_file", self.tempfile, group='oslo_policy') return webtest.TestApp(app.load_app(conf, appname='panko+noauth')) def tearDown(self): os.remove(self.tempfile) super(TestApiEventAdminRBAC, self).tearDown() def test_get_events(self): headers_rbac = {"X-Roles": "admin", "X-User-Id": "user-good", "X-Project-Id": "project-good"} self.get_json(self.PATH, headers=headers_rbac, status=200) def test_get_events_bad(self): headers_rbac = {"X-Roles": "Member", "X-User-Id": "user-good", "X-Project-Id": "project-good"} self.get_json(self.PATH, headers=headers_rbac, status=403) def test_get_event(self): headers = {"X-Roles": "admin", "X-User-Id": "user-good", "X-Project-Id": "project-good"} self.get_json(self.PATH + "/" + self.message_id, headers=headers, status=200) def test_get_event_bad(self): headers = {"X-Roles": "Member", "X-User-Id": "user-good", "X-Project-Id": "project-good"} self.get_json(self.PATH + "/" + self.message_id, headers=headers, status=403) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/api/v2/test_app.py0000664000175000017500000001012100000000000022623 0ustar00zuulzuul00000000000000# # Copyright 2013 IBM Corp. # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test basic panko-api app """ from panko.tests.functional.api import v2 class TestApiMiddleware(v2.FunctionalTest): no_lang_translated_error = 'No lang translated error' en_US_translated_error = 'en-US translated error' def _fake_translate(self, message, user_locale): if user_locale is None: return self.no_lang_translated_error else: return self.en_US_translated_error def test_json_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml;q=0.8, \ application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "text/html,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) def test_xml_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json;q=0.8 \ ,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/api/v2/test_capabilities.py0000664000175000017500000000215700000000000024506 0ustar00zuulzuul00000000000000# # Copyright Ericsson AB 2014. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from panko.tests.functional.api import v2 as tests_api class TestCapabilitiesController(tests_api.FunctionalTest): def setUp(self): super(TestCapabilitiesController, self).setUp() self.url = '/capabilities' def test_capabilities(self): data = self.get_json(self.url) # check that capabilities data contains both 'api' and 'storage' fields self.assertIsNotNone(data) self.assertNotEqual({}, data) self.assertIn('api', data) self.assertIn('event_storage', data) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/api/v2/test_event_scenarios.py0000664000175000017500000010700700000000000025244 0ustar00zuulzuul00000000000000# # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test event, event_type and trait retrieval.""" import datetime from oslo_utils import uuidutils import webtest.app from panko.storage import models from panko.tests import db as tests_db from panko.tests.functional.api import v2 USER_ID = uuidutils.generate_uuid(dashed=False) PROJ_ID = uuidutils.generate_uuid(dashed=False) HEADERS = {"X-Roles": "admin", "X-User-Id": USER_ID, "X-Project-Id": PROJ_ID} class EventTestBase(v2.FunctionalTest): def setUp(self): super(EventTestBase, self).setUp() self._generate_models() def _generate_models(self): event_models = [] base = 0 self.s_time = datetime.datetime(2013, 12, 31, 5, 0) self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) for event_type in ['Foo', 'Bar', 'Zoo']: trait_models = [models.Trait(name, type, value) for name, type, value in [ ('trait_A', models.Trait.TEXT_TYPE, "my_%s_text" % event_type), ('trait_B', models.Trait.INT_TYPE, base + 1), ('trait_C', models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', models.Trait.DATETIME_TYPE, self.trait_time)]] # Message ID for test will be 'base'. So, message ID for the first # event will be '0', the second '100', and so on. # trait_time in first event will be equal to self.trait_time # (datetime.datetime(2013, 12, 31, 5, 0)), next will add 1 day, so # second will be (datetime.datetime(2014, 01, 01, 5, 0)) and so on. event_models.append( models.Event(message_id=str(base), event_type=event_type, generated=self.trait_time, traits=trait_models, raw={'status': {'nested': 'started'}})) base += 100 self.trait_time += datetime.timedelta(days=1) self.conn.record_events(event_models) class TestEventTypeAPI(EventTestBase): PATH = '/event_types' def test_event_types(self): data = self.get_json(self.PATH, headers=HEADERS) for event_type in ['Foo', 'Bar', 'Zoo']: self.assertIn(event_type, data) class TestTraitAPI(EventTestBase): PATH = '/event_types/%s/traits' def test_get_traits_for_event(self): path = self.PATH % "Foo" data = self.get_json(path, headers=HEADERS) self.assertEqual(4, len(data)) def test_get_event_invalid_path(self): data = self.get_json('/event_types/trait_A/', headers=HEADERS, expect_errors=True) self.assertEqual(404, data.status_int) def test_get_traits_for_non_existent_event(self): path = self.PATH % "NO_SUCH_EVENT_TYPE" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) def test_get_trait_data_for_event(self): path = (self.PATH % "Foo") + "/trait_A" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_A", data[0]['name']) path = (self.PATH % "Foo") + "/trait_B" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_B", data[0]['name']) self.assertEqual("1", data[0]['value']) path = (self.PATH % "Foo") + "/trait_D" data = self.get_json(path, headers=HEADERS) self.assertEqual(1, len(data)) self.assertEqual("trait_D", data[0]['name']) self.assertEqual((self.trait_time - datetime.timedelta(days=3)). isoformat(), data[0]['value']) def test_get_trait_data_for_non_existent_event(self): path = (self.PATH % "NO_SUCH_EVENT") + "/trait_A" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) def test_get_trait_data_for_non_existent_trait(self): path = (self.PATH % "Foo") + "/no_such_trait" data = self.get_json(path, headers=HEADERS) self.assertEqual([], data) class TestEventAPI(EventTestBase): PATH = '/events' def test_get_events(self): data = self.get_json(self.PATH, headers=HEADERS) self.assertEqual(3, len(data)) # We expect to get native UTC generated time back trait_time = self.s_time for event in data: expected_generated = trait_time.isoformat() self.assertIn(event['event_type'], ['Foo', 'Bar', 'Zoo']) self.assertEqual(4, len(event['traits'])) self.assertEqual({'status': {'nested': 'started'}}, event['raw']), self.assertEqual(expected_generated, event['generated']) for trait_name in ['trait_A', 'trait_B', 'trait_C', 'trait_D']: self.assertIn(trait_name, map(lambda x: x['name'], event['traits'])) trait_time += datetime.timedelta(days=1) def test_get_event_by_message_id(self): event = self.get_json(self.PATH + "/100", headers=HEADERS) expected_traits = [{'name': 'trait_A', 'type': 'string', 'value': 'my_Bar_text'}, {'name': 'trait_B', 'type': 'integer', 'value': '101'}, {'name': 'trait_C', 'type': 'float', 'value': '100.123456'}, {'name': 'trait_D', 'type': 'datetime', 'value': '2014-01-01T05:00:00'}] self.assertEqual('100', event['message_id']) self.assertEqual('Bar', event['event_type']) self.assertEqual('2014-01-01T05:00:00', event['generated']) self.assertEqual(expected_traits, event['traits']) def test_get_event_by_message_id_no_such_id(self): data = self.get_json(self.PATH + "/DNE", headers=HEADERS, expect_errors=True) self.assertEqual(404, data.status_int) def test_get_events_filter_event_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'event_type', 'value': 'Foo'}]) self.assertEqual(1, len(data)) def test_get_events_filter_trait_no_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_trait_empty_type(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': ''}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_trait_invalid_type(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'whats-up'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual("The data type whats-up is not supported. The " "supported data type list is: [\'integer\', " "\'float\', \'string\', \'datetime\']", resp.json['error_message']['faultstring']) def test_get_events_filter_operator_invalid_type(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'op': 'whats-up'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual("Operator whats-up is not supported. The " "supported operators are: (\'lt\', \'le\', " "\'eq\', \'ne\', \'ge\', \'gt\')", resp.json['error_message']['faultstring']) def test_get_events_filter_start_timestamp(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'start_timestamp', 'op': 'ge', 'value': '2014-01-01T00:00:00'}]) self.assertEqual(2, len(data)) sorted_types = sorted([d['event_type'] for d in data]) event_types = ['Foo', 'Bar', 'Zoo'] self.assertEqual(sorted_types, sorted(event_types[1:])) def test_get_events_filter_start_timestamp_invalid_op(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'start_timestamp', 'op': 'gt', 'value': '2014-01-01T00:00:00'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual(u'Operator gt is not supported. Only' ' `ge\' operator is available for field' ' start_timestamp', resp.json['error_message']['faultstring']) def test_get_events_filter_end_timestamp(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'end_timestamp', 'op': 'le', 'value': '2014-01-03T00:00:00'}]) self.assertEqual(3, len(data)) event_types = ['Foo', 'Bar', 'Zoo'] sorted_types = sorted([d['event_type'] for d in data]) self.assertEqual(sorted_types, sorted(event_types[:3])) def test_get_events_filter_end_timestamp_invalid_op(self): resp = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'end_timestamp', 'op': 'gt', 'value': '2014-01-03T00:00:00'}], expect_errors=True) self.assertEqual(400, resp.status_code) self.assertEqual(u'Operator gt is not supported. Only' ' `le\' operator is available for field' ' end_timestamp', resp.json['error_message']['faultstring']) def test_get_events_filter_start_end_timestamp(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'start_timestamp', 'op': 'ge', 'value': '2014-01-02T00:00:00'}, {'field': 'end_timestamp', 'op': 'le', 'value': '2014-01-03T10:00:00'}]) self.assertEqual(1, len(data)) sorted_types = sorted([d['event_type'] for d in data]) event_types = ['Foo', 'Bar', 'Zoo'] self.assertEqual(sorted_types, sorted(event_types[2:3])) def test_get_events_filter_text_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_filter_int_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer'}]) self.assertEqual(1, len(data)) self.assertEqual('Bar', data[0]['event_type']) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_B'] self.assertEqual(1, len(traits)) self.assertEqual('integer', traits[0]['type']) self.assertEqual('101', traits[0]['value']) def test_get_events_filter_float_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float'}]) self.assertEqual(1, len(data)) self.assertEqual('Zoo', data[0]['event_type']) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_C'] self.assertEqual(1, len(traits)) self.assertEqual('float', traits[0]['type']) self.assertEqual('200.123456', traits[0]['value']) def test_get_events_filter_datetime_trait(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime'}]) self.assertEqual(1, len(data)) traits = [x for x in data[0]['traits'] if x['name'] == 'trait_D'] self.assertEqual(1, len(traits)) self.assertEqual('datetime', traits[0]['type']) self.assertEqual('2014-01-01T05:00:00', traits[0]['value']) def test_get_events_multiple_filters(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer'}, {'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(1, len(data)) self.assertEqual('Foo', data[0]['event_type']) def test_get_events_multiple_filters_no_matches(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer'}, {'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}]) self.assertEqual(0, len(data)) def test_get_events_multiple_filters_same_field_different_values(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string'}, {'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string'}]) self.assertEqual(0, len(data)) def test_get_events_not_filters(self): data = self.get_json(self.PATH, headers=HEADERS, q=[]) self.assertEqual(3, len(data)) def test_get_events_filter_op_string(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string', 'op': 'lt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Zoo_text', 'type': 'string', 'op': 'le'}]) self.assertEqual(3, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Foo_text', 'type': 'string', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Bar_text', 'type': 'string', 'op': 'gt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_A', 'value': 'my_Zoo_text', 'type': 'string', 'op': 'ge'}]) self.assertEqual(1, len(data)) def test_get_events_filter_op_integer(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '201', 'type': 'integer', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '101', 'type': 'integer', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '201', 'type': 'integer', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_op_float(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '100.123456', 'type': 'float', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '0.123456', 'type': 'float', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '100.123456', 'type': 'float', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '200.123456', 'type': 'float', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_C', 'value': '0.123456', 'type': 'float', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_op_datatime(self): data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime', 'op': 'eq'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-02T05:00:00', 'type': 'datetime', 'op': 'lt'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2013-12-31T05:00:00', 'type': 'datetime', 'op': 'le'}]) self.assertEqual(1, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-01T05:00:00', 'type': 'datetime', 'op': 'ne'}]) self.assertEqual(2, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2014-01-02T05:00:00', 'type': 'datetime', 'op': 'gt'}]) self.assertEqual(0, len(data)) data = self.get_json(self.PATH, headers=HEADERS, q=[{'field': 'trait_D', 'value': '2013-12-31T05:00:00', 'type': 'datetime', 'op': 'ge'}]) self.assertEqual(3, len(data)) def test_get_events_filter_wrong_op(self): self.assertRaises(webtest.app.AppError, self.get_json, self.PATH, headers=HEADERS, q=[{'field': 'trait_B', 'value': '1', 'type': 'integer', 'op': 'el'}]) class AclRestrictedEventTestBase(v2.FunctionalTest): def setUp(self): super(AclRestrictedEventTestBase, self).setUp() self.admin_user_id = uuidutils.generate_uuid(dashed=False) self.admin_proj_id = uuidutils.generate_uuid(dashed=False) self.user_id = uuidutils.generate_uuid(dashed=False) self.proj_id = uuidutils.generate_uuid(dashed=False) self._generate_models() def _generate_models(self): event_models = [] self.s_time = datetime.datetime(2013, 12, 31, 5, 0) event_models.append( models.Event(message_id='1', event_type='empty_ev', generated=self.s_time, traits=[models.Trait('random', models.Trait.TEXT_TYPE, 'blah')], raw={})) event_models.append( models.Event(message_id='2', event_type='admin_ev', generated=self.s_time, traits=[models.Trait('project_id', models.Trait.TEXT_TYPE, self.admin_proj_id), models.Trait('user_id', models.Trait.TEXT_TYPE, self.admin_user_id)], raw={})) event_models.append( models.Event(message_id='3', event_type='user_ev', generated=self.s_time, traits=[models.Trait('project_id', models.Trait.TEXT_TYPE, self.proj_id), models.Trait('user_id', models.Trait.TEXT_TYPE, self.user_id)], raw={})) self.conn.record_events(event_models) def test_non_admin_access(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(1, len(data)) self.assertEqual('user_ev', data[0]['event_type']) def test_non_admin_access_single(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events/3', headers=a_headers) self.assertEqual('user_ev', data['event_type']) def test_non_admin_access_incorrect_user(self): a_headers = {"X-Roles": "member", "X-User-Id": 'blah', "X-Project-Id": self.proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(0, len(data)) def test_non_admin_access_incorrect_proj(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": 'blah'} data = self.get_json('/events', headers=a_headers) self.assertEqual(0, len(data)) def test_non_admin_access_single_invalid(self): a_headers = {"X-Roles": "member", "X-User-Id": self.user_id, "X-Project-Id": self.proj_id} data = self.get_json('/events/1', headers=a_headers, expect_errors=True) self.assertEqual(404, data.status_int) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers) self.assertEqual(2, len(data)) self.assertEqual(set(['empty_ev', 'admin_ev']), set(ev['event_type'] for ev in data)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access_trait_filter(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers, q=[{'field': 'random', 'value': 'blah', 'type': 'string', 'op': 'eq'}]) self.assertEqual(1, len(data)) self.assertEqual('empty_ev', data[0]['event_type']) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access_single(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events/1', headers=a_headers) self.assertEqual('empty_ev', data['event_type']) data = self.get_json('/events/2', headers=a_headers) self.assertEqual('admin_ev', data['event_type']) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access_all(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers, q=[{'field': 'all_tenants', 'value': 'True', 'type': 'string', 'op': 'eq'}]) self.assertEqual(3, len(data)) self.assertEqual(set(['empty_ev', 'admin_ev', 'user_ev']), set(ev['event_type'] for ev in data)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb', 'es') def test_admin_access_trait_filter_no_access(self): a_headers = {"X-Roles": "admin", "X-User-Id": self.admin_user_id, "X-Project-Id": self.admin_proj_id} data = self.get_json('/events', headers=a_headers, q=[{'field': 'user_id', 'value': self.user_id, 'type': 'string', 'op': 'eq'}]) self.assertEqual(0, len(data)) class EventRestrictionTestBase(v2.FunctionalTest): def setUp(self): super(EventRestrictionTestBase, self).setUp() self.CONF.set_override('default_api_return_limit', 10, group='api') self._generate_models() def _generate_models(self): event_models = [] base = 0 self.s_time = datetime.datetime(2013, 12, 31, 5, 0) self.trait_time = datetime.datetime(2013, 12, 31, 5, 0) for i in range(20): trait_models = [models.Trait(name, type, value) for name, type, value in [ ('trait_A', models.Trait.TEXT_TYPE, "my_text"), ('trait_B', models.Trait.INT_TYPE, base + 1), ('trait_C', models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', models.Trait.DATETIME_TYPE, self.trait_time)]] event_models.append( models.Event(message_id=uuidutils.generate_uuid(), event_type='foo.bar', generated=self.trait_time, traits=trait_models, raw={'status': {'nested': 'started'}})) self.trait_time += datetime.timedelta(seconds=1) self.conn.record_events(event_models) class TestEventRestriction(EventRestrictionTestBase): def test_get_limit(self): data = self.get_json('/events?limit=1', headers=HEADERS) self.assertEqual(1, len(data)) def test_get_limit_negative(self): self.assertRaises(webtest.app.AppError, self.get_json, '/events?limit=-2', headers=HEADERS) def test_get_limit_bigger(self): data = self.get_json('/events?limit=100', headers=HEADERS) self.assertEqual(20, len(data)) def test_get_default_limit(self): data = self.get_json('/events', headers=HEADERS) self.assertEqual(10, len(data)) @tests_db.run_with('mysql', 'pgsql', 'sqlite', 'postgresql') class TestEventSort(EventTestBase): PATH = '/events' def test_get_limit_decr(self): data = self.get_json( '/events?limit=3&sort=generated:desc&sort=message_id', headers=HEADERS) self.assertEqual(3, len(data)) # check that data is sorted in most recent order # self.s_time - start (earliest) # self.trait_time - end (latest) trait_time = self.trait_time for event in data: trait_time -= datetime.timedelta(days=1) expected_generated = trait_time.isoformat() self.assertEqual(expected_generated, event['generated']) def test_get_limit_incr(self): data = self.get_json( '/events?limit=3&sort=generated:asc&sort=message_id', headers=HEADERS) self.assertEqual(3, len(data)) # check that data is sorted in decr order # self.s_time - start (earliest) # self.trait_time - end (latest) trait_time = self.s_time for event in data: expected_generated = trait_time.isoformat() self.assertEqual(expected_generated, event['generated']) trait_time += datetime.timedelta(days=1) def test_invalid_sort_key(self): resp = self.get_json('/events?sort=invalid_key:desc', headers=HEADERS, expect_errors=True) self.assertEqual(resp.status_code, 400) self.assertEqual("Invalid input for field/attribute sort. Value: " "'invalid_key:desc'. the sort parameter should be" " a pair of sort key and sort dir combined with " "':', or only sort key specified and sort dir will " "be default 'asc', the supported sort keys are: " "('message_id', 'generated')", resp.json['error_message'] ['faultstring']) def test_invalid_sort_dir(self): resp = self.get_json('/events?sort=message_id:bah', headers=HEADERS, expect_errors=True) self.assertEqual(resp.status_code, 400) self.assertEqual("Invalid input for field/attribute sort direction. " "Value: 'message_id:bah'. the sort parameter " "should be a pair of sort key and sort dir combined " "with ':', or only sort key specified and sort dir " "will be default 'asc', the supported sort " "directions are: ('asc', 'desc')", resp.json['error_message'] ['faultstring']) def test_sort_message_id(self): data = self.get_json('/events?limit=3&sort=message_id:desc', headers=HEADERS) self.assertEqual(3, len(data)) result = [a['message_id'] for a in data] self.assertEqual(['200', '100', '0'], result) data = self.get_json('/events?limit=3&sort=message_id:asc', headers=HEADERS) self.assertEqual(3, len(data)) result = [a['message_id'] for a in data] self.assertEqual(['0', '100', '200'], result) def test_paginate_query(self): data1 = self.get_json( '/events?limit=1&sort=message_id:asc', headers=HEADERS) self.assertEqual(1, len(data1)) self.assertEqual('0', data1[0]['message_id']) data2 = self.get_json( '/events?limit=3&marker=%s&sort=message_id:asc' % data1[0]['message_id'], headers=HEADERS) self.assertEqual(2, len(data2)) result = [a['message_id'] for a in data2] self.assertEqual(['100', '200'], result) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4781728 panko-10.0.0/panko/tests/functional/gabbi/0000775000175000017500000000000000000000000020403 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/__init__.py0000664000175000017500000000000000000000000022502 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/fixtures.py0000664000175000017500000001163300000000000022632 0ustar00zuulzuul00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures used during Gabbi-based test runs.""" import datetime import os from unittest import case from urllib import parse as urlparse from gabbi import fixture from oslo_config import cfg from oslo_policy import opts from oslo_utils import fileutils from oslo_utils import uuidutils import sqlalchemy_utils from panko.api import app from panko import service from panko import storage from panko.storage import models # NOTE(chdent): Hack to restore semblance of global configuration to # pass to the WSGI app used per test suite. LOAD_APP_KWARGS are the olso # configuration, and the pecan application configuration of # which the critical part is a reference to the current indexer. LOAD_APP_KWARGS = None def setup_app(): global LOAD_APP_KWARGS return app.load_app(**LOAD_APP_KWARGS) class ConfigFixture(fixture.GabbiFixture): """Establish the relevant configuration for a test run.""" def start_fixture(self): """Set up config.""" global LOAD_APP_KWARGS self.conf = None # Determine the database connection. db_url = os.environ.get('PIFPAF_URL', "sqlite://").replace( "mysql://", "mysql+pymysql://") if not db_url: raise case.SkipTest('No database connection configured') conf = self.conf = service.prepare_service([], []) opts.set_defaults(self.conf) content = ('{"default": ""}') content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='policy', suffix='.json') conf.set_override("policy_file", self.tempfile, group='oslo_policy') conf.set_override( 'api_paste_config', os.path.abspath('etc/panko/api_paste.ini') ) parsed_url = list(urlparse.urlparse(db_url)) parsed_url[2] += '-%s' % uuidutils.generate_uuid(dashed=False) db_url = urlparse.urlunparse(parsed_url) conf.set_override('connection', db_url, group='database') if (parsed_url[0].startswith("mysql") or parsed_url[0].startswith("postgresql")): sqlalchemy_utils.create_database(conf.database.connection) self.conn = storage.get_connection_from_config(self.conf) self.conn.upgrade() LOAD_APP_KWARGS = { 'conf': conf, 'appname': 'panko+noauth', } def stop_fixture(self): """Reset the config and remove data.""" if self.conn: self.conn.clear() if self.conf: storage.get_connection_from_config(self.conf).clear() class EventDataFixture(ConfigFixture): """Instantiate some sample event data for use in testing.""" def start_fixture(self): """Create some events.""" super(EventDataFixture, self).start_fixture() events = [] name_list = ['chocolate.chip', 'peanut.butter', 'sugar'] for ix, name in enumerate(name_list): timestamp = datetime.datetime.utcnow() message_id = 'fea1b15a-1d47-4175-85a5-a4bb2c72924{}'.format(ix) traits = [models.Trait('type', 1, name), models.Trait('ate', 2, ix)] event = models.Event(message_id, 'cookies_{}'.format(name), timestamp, traits, {'nested': {'inside': 'value'}}) events.append(event) self.conn.record_events(events) class CORSConfigFixture(fixture.GabbiFixture): """Inject mock configuration for the CORS middleware.""" def start_fixture(self): # Here we monkeypatch GroupAttr.__getattr__, necessary because the # paste.ini method of initializing this middleware creates its own # ConfigOpts instance, bypassing the regular config fixture. def _mock_getattr(instance, key): if key != 'allowed_origin': return self._original_call_method(instance, key) return "http://valid.example.com" self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__ cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr def stop_fixture(self): """Remove the monkeypatch.""" cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/functional/gabbi/gabbits/0000775000175000017500000000000000000000000022016 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/gabbits/api-events-no-data.yaml0000664000175000017500000001365000000000000026303 0ustar00zuulzuul00000000000000# These test run against the Events API with no data preloaded into the # datastore. This allows us to verify that requests are still processed # normally even if data is missing for that endpoint. fixtures: - ConfigFixture tests: # this attempts to get all the events and expects an empty list back - name: get all events GET: /v2/events request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # this attempts to get all the events with no role/user/project # info in header and expects a 403 - name: get events with bad headers GET: /v2/events status: 403 # this attempts to get all the events with no user/project # info in header and expects a 403 - name: get events with admin only header GET: /v2/events request_headers: X-Roles: admin status: 403 # this attempts to get all the events with no project # info in header and expects a 403 - name: get events with no project header GET: /v2/events request_headers: X-Roles: admin X-User-Id: user1 status: 403 # this attempts to get all the events with no user # info in header and expects a 403 - name: get events with no user header GET: /v2/events request_headers: X-Roles: admin X-Project-Id: project1 status: 403 # this attempts to get all the events with invalid parameters and expects a 400 - name: get events with bad params GET: /v2/events?bad_Stuff_here request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 400 # this attempts to query the events with the correct parameterized query syntax # and expects an empty list - name: get events that match query GET: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # expects an empty list - name: get events that match query via request data GET: /v2/events request_headers: content-type: application/json X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: event_type op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json response_strings: - "[]" # this attempts to query the events with the correct parameterized query syntax # but a bad field name and expects an empty list - name: get events that match bad query GET: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # a bad field name and expects an empty list - name: get events that match bad query via request data GET: /v2/events request_headers: content-type: application/json X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: bad_field op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json response_strings: - "[]" # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list and a bad field name and expects a 400 - name: get events that match bad query via request data malformed list GET: /v2/events request_headers: content-type: application/json X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: - field: bad_field op: eq type: string value: cookies_chocolate.chip status: 400 # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list along with a bad content-type. Should # return a 400 - name: get events that match bad query via request data wrong type GET: /v2/events request_headers: content-type: text/plain X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: "field: bad_field op: eq type: string value: cookies_chocolate.chip xfail: True" status: 415 # Get a single event by message_id no data is present so should return a 404 - name: get a single event GET: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all the event types should return an empty list - name: get all event types GET: /v2/event_types request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # Get a single event type by name, this API is unused and should return a 404 - name: get event types for good event_type unused api GET: /v2/event_types/cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all traits for an event type should return an empty list - name: get all traits for event type GET: /v2/event_types/cookies_chocolate.chip/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # Get all traits named ate for an event type should return an empty list - name: get all traits named ate for event type GET: /v2/event_types/cookies_chocolate.chip/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/gabbits/api-events-with-data.yaml0000664000175000017500000001656100000000000026646 0ustar00zuulzuul00000000000000# These test run against the Events API with data preloaded into the datastore. fixtures: - ConfigFixture - EventDataFixture tests: # this attempts to get all the events and checks to make sure they are valid - name: get all events GET: /v2/events request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json verbose: True response_json_paths: $[/event_type].[0].event_type: cookies_chocolate.chip $[/event_type].[0].traits[/value].[0].value: '0' $[/event_type].[0].traits[/value].[1].value: chocolate.chip $[/event_type].[0].raw.nested.inside: value $[/event_type].[1].event_type: cookies_peanut.butter $[/event_type].[1].traits[/name].[0].name: ate $[/event_type].[1].traits[/name].[1].name: type $[/event_type].[1].raw.nested.inside: value $[/event_type].[2].event_type: cookies_sugar $[/event_type].[2].traits[/type].[0].type: integer $[/event_type].[2].traits[/type].[1].type: string $[/event_type].[2].raw.nested.inside: value # this attempts to get all the events with invalid parameters and expects a 400 - name: get events with bad params GET: /v2/events?bad_Stuff_here request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 400 # this attempts to query the events with the correct parameterized query syntax # and expects a matching event - name: get events that match query GET: /v2/events?q.field=event_type&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_json_paths: $[/event_type].[0].event_type: cookies_chocolate.chip $[/event_type].[0].traits[/value].[1].value: chocolate.chip # this attempts to query the events with the correct data query syntax and # expects a matching event - name: get events that match query via data GET: /v2/events request_headers: content-type: application/json X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: event_type op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json response_json_paths: $[/event_type].[0].event_type: cookies_chocolate.chip $[/event_type].[0].traits[/value].[1].value: chocolate.chip # this attempts to query the events with the correct parameterized query syntax # but a bad field name and expects an empty list - name: get events that match bad query GET: /v2/events?q.field=bad_field&q.op=eq&q.type=string&q.value=cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # this attempts to query the events with the correct data query syntax and # a bad field name and expects an empty list - name: get events that match bad query via data GET: /v2/events request_headers: content-type: application/json X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: q: - field: bad_field op: eq type: string value: cookies_chocolate.chip response_headers: content-type: application/json response_strings: - "[]" # this attempts to query the events with the wrong data query syntax missing the # q object but supplying the field list and a bad field name and expects a 400 - name: get events that match bad query via data list GET: /v2/events request_headers: content-type: application/json X-Roles: admin X-User-Id: user1 X-Project-Id: project1 data: - field: bad_field op: eq type: string value: cookies_chocolate.chip status: 400 # Get a single event by message_id should return an event - name: get a single event GET: /v2/events/fea1b15a-1d47-4175-85a5-a4bb2c729240 request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_json_paths: $.event_type: cookies_chocolate.chip $.traits[/value].[0].value: '0' $.traits[/value].[1].value: chocolate.chip # Get a single event by message_id no data is present so should return a 404 - name: get a single event that does not exist GET: /v2/events/bad-id request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all the event types should return a list of event types - name: get all event types GET: /v2/event_types request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - cookies_chocolate.chip - cookies_peanut.butter - cookies_sugar # Get a single event type by valid name, this API is unused and should return a 404 - name: get event types for good event_type unused api GET: /v2/event_types/cookies_chocolate.chip request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get a single event type by invalid name, this API is unused and should return a 404 - name: get event types for bad event_type unused api GET: /v2/event_types/bad_event_type request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 status: 404 # Get all traits for a valid event type should return an list of traits - name: get all traits for event type GET: /v2/event_types/cookies_chocolate.chip/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_json_paths: $.[0].type: string $.[1].name: ate # Get all traits for an invalid event type should return an empty list - name: get all traits names for event type bad event type GET: /v2/event_types/bad_event_type/traits request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # Get all traits of type ate for a valid event type should return an list of # traits - name: get all traits of type ate for event type GET: /v2/event_types/cookies_chocolate.chip/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_json_paths: $.[0].name: ate $.[0].value: '0' # Get all traits of type ate for an invalid event type should return an empty # list - name: get all traits of type for event type bad event type GET: /v2/event_types/bad_event_type/traits/ate request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" # Get all traits of type bad_trait_name for a valid event type should return an # empty list - name: get all traits of type instances for event type bad trait name GET: /v2/event_types/cookies_chocolate.chip/traits/bad_trait_name request_headers: X-Roles: admin X-User-Id: user1 X-Project-Id: project1 response_headers: content-type: application/json response_strings: - "[]" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/gabbits/basic.yaml0000664000175000017500000000126000000000000023762 0ustar00zuulzuul00000000000000# # Some simple tests just to confirm that the system works. # fixtures: - ConfigFixture tests: # Root gives us some information on where to go from here. - name: quick root check GET: / response_headers: content-type: application/json response_strings: - '"base": "application/json"' response_json_paths: versions.values.[0].status: stable versions.values.[0].media-types.[0].base: application/json # NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404! - name: v2 visit desc: this demonstrates a bug in the info in / GET: $RESPONSE['versions.values.[0].links.[0].href'] status: 404 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/gabbits/capabilities.yaml0000664000175000017500000000037500000000000025340 0ustar00zuulzuul00000000000000# # Explore the capabilities API # fixtures: - ConfigFixture tests: - name: get capabilities desc: retrieve capabilities for the mongo store GET: /v2/capabilities response_json_paths: $.event_storage.['storage:production_ready']: true ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/gabbits/middleware.yaml0000664000175000017500000000210500000000000025015 0ustar00zuulzuul00000000000000# # Test the middlewares. Just CORS for now. # fixtures: - ConfigFixture - CORSConfigFixture tests: - name: valid cors options OPTIONS: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors options OPTIONS: / status: 200 request_headers: origin: http://invalid.example.com access-control-request-method: GET response_forbidden_headers: - access-control-allow-origin - name: valid cors get GET: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors get GET: / status: 200 request_headers: origin: http://invalid.example.com response_forbidden_headers: - access-control-allow-origin ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/functional/gabbi/gabbits_prefix/0000775000175000017500000000000000000000000023373 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/gabbits_prefix/basic.yaml0000664000175000017500000000100300000000000025332 0ustar00zuulzuul00000000000000# # Confirm root reports the right data including a prefixed URL # fixtures: - ConfigFixture tests: # Root gives us some information on where to go from here. - name: quick root check GET: / response_headers: content-type: application/json response_strings: - '"base": "application/json"' response_json_paths: versions.values.[0].status: stable versions.values.[0].media-types.[0].base: application/json response_strings: - /telemetry/ ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/test_gabbi.py0000664000175000017500000000217300000000000023063 0ustar00zuulzuul00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A test module to exercise the Panko API with gabbi For the sake of exploratory development. """ import os from gabbi import driver from panko.tests.functional.gabbi import fixtures TESTS_DIR = 'gabbits' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, intercept=fixtures.setup_app, fixture_module=fixtures) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/gabbi/test_gabbi_prefix.py0000664000175000017500000000222600000000000024437 0ustar00zuulzuul00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A module to exercise the Panko API with gabbi with a URL prefix""" import os from gabbi import driver from panko.tests.functional.gabbi import fixtures TESTS_DIR = 'gabbits_prefix' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, prefix='/telemetry', intercept=fixtures.setup_app, fixture_module=fixtures) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/functional/hooks/0000775000175000017500000000000000000000000020462 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/hooks/post_test_hook.sh0000775000175000017500000000336600000000000024075 0ustar00zuulzuul00000000000000#!/bin/bash -xe # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. function generate_testr_results { if [ -f .testrepository/0 ]; then sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html sudo gzip -9 $BASE/logs/testrepository.subunit sudo gzip -9 $BASE/logs/testr_results.html sudo chown $USER:$USER $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz fi } export PANKO_DIR="$BASE/new/panko" # Go to the panko dir cd $PANKO_DIR if [[ -z "$STACK_USER" ]]; then export STACK_USER=stack fi sudo chown -R $STACK_USER:stack $PANKO_DIR # Run tests echo "Running panko functional test suite" set +e # NOTE(ityaptin) Expected a script param which contains a backend name PANKO_TEST_BACKEND="$1" sudo -E -H -u ${STACK_USER:-${USER}} tox -efunctional EXIT_CODE=$? set -e # Collect and parse result generate_testr_results exit $EXIT_CODE ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/functional/publisher/0000775000175000017500000000000000000000000021334 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/publisher/__init__.py0000664000175000017500000000000000000000000023433 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/functional/storage/0000775000175000017500000000000000000000000021003 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/storage/__init__.py0000664000175000017500000000000000000000000023102 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/storage/test_impl_mongodb.py0000664000175000017500000000434700000000000025072 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for panko/storage/impl_mongodb.py .. note:: In order to run the tests against another MongoDB server set the environment variable PANKO_TEST_MONGODB_URL to point to a MongoDB server before running the tests. """ from panko.storage import impl_mongodb from panko.tests import base as test_base from panko.tests import db as tests_db @tests_db.run_with('mongodb') class IndexTest(tests_db.TestBase): def test_event_ttl_index_absent(self): # create a fake index and check it is deleted self.conn.clear_expired_data(-1, 0) self.assertNotIn("event_ttl", self.conn.db.event.index_information()) self.conn.clear_expired_data(456789, 0) self.assertEqual(456789, self.conn.db.event.index_information() ["event_ttl"]['expireAfterSeconds']) def test_event_ttl_index_present(self): self.conn.clear_expired_data(456789, 0) self.assertEqual(456789, self.conn.db.event.index_information() ["event_ttl"]['expireAfterSeconds']) self.conn.clear_expired_data(-1, 0) self.assertNotIn("event_ttl", self.conn.db.event.index_information()) class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_mongodb.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/storage/test_impl_sqlalchemy.py0000664000175000017500000001040700000000000025601 0ustar00zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for panko/storage/impl_sqlalchemy.py .. note:: In order to run the tests against real SQL server set the environment variable PANKO_TEST_SQL_URL to point to a SQL server before running the tests. """ import datetime import reprlib from panko.storage import impl_sqlalchemy as impl_sqla_event from panko.storage import models from panko.storage.sqlalchemy import models as sql_models from panko.tests import base as test_base from panko.tests import db as tests_db @tests_db.run_with('sqlite', 'mysql', 'pgsql') class PankoBaseTest(tests_db.TestBase): def test_panko_base(self): base = sql_models.PankoBase() base['key'] = 'value' self.assertEqual('value', base['key']) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTypeTest(tests_db.TestBase): # EventType is a construct specific to sqlalchemy # Not applicable to other drivers. def setUp(self): super(EventTypeTest, self).setUp() self.session = self.conn._engine_facade.get_session() self.session.begin() def test_event_type_exists(self): et1 = self.conn._get_or_create_event_type("foo", self.session) self.assertTrue(et1.id >= 0) et2 = self.conn._get_or_create_event_type("foo", self.session) self.assertEqual(et2.id, et1.id) self.assertEqual(et2.desc, et1.desc) def test_event_type_unique(self): et1 = self.conn._get_or_create_event_type("foo", self.session) self.assertTrue(et1.id >= 0) et2 = self.conn._get_or_create_event_type("blah", self.session) self.assertNotEqual(et1.id, et2.id) self.assertNotEqual(et1.desc, et2.desc) # Test the method __repr__ returns a string self.assertTrue(reprlib.repr(et2)) def tearDown(self): self.session.rollback() self.session.close() super(EventTypeTest, self).tearDown() @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTest(tests_db.TestBase): def _verify_data(self, trait, trait_table): now = datetime.datetime.utcnow() ev = models.Event('1', 'name', now, [trait], {}) self.conn.record_events([ev]) session = self.conn._engine_facade.get_session() t_tables = [sql_models.TraitText, sql_models.TraitFloat, sql_models.TraitInt, sql_models.TraitDatetime] for table in t_tables: if table == trait_table: self.assertEqual(1, session.query(table).count()) else: self.assertEqual(0, session.query(table).count()) def test_string_traits(self): model = models.Trait("Foo", models.Trait.TEXT_TYPE, "my_text") self._verify_data(model, sql_models.TraitText) def test_int_traits(self): model = models.Trait("Foo", models.Trait.INT_TYPE, 100) self._verify_data(model, sql_models.TraitInt) def test_float_traits(self): model = models.Trait("Foo", models.Trait.FLOAT_TYPE, 123.456) self._verify_data(model, sql_models.TraitFloat) def test_datetime_traits(self): now = datetime.datetime.utcnow() model = models.Trait("Foo", models.Trait.DATETIME_TYPE, now) self._verify_data(model, sql_models.TraitDatetime) def test_event_repr(self): ev = sql_models.Event('msg_id', None, False, {}) ev.id = 100 self.assertTrue(reprlib.repr(ev)) class CapabilitiesTest(test_base.BaseTestCase): # Check the returned capabilities list, which is specific to each DB # driver def test_capabilities(self): expected_capabilities = { 'events': {'query': {'simple': True}}, } actual_capabilities = impl_sqla_event.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/storage/test_storage_scenarios.py0000664000175000017500000005404400000000000026135 0ustar00zuulzuul00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for DB backend implementation test""" import datetime import operator from unittest import mock from oslo_utils import timeutils from panko import storage from panko.storage import models from panko.tests import db as tests_db class EventTestBase(tests_db.TestBase): """Separate test base class. We don't want to inherit all the Meter stuff. """ def setUp(self): super(EventTestBase, self).setUp() self.prepare_data() def prepare_data(self): self.models = [] base = 0 self.start = datetime.datetime(2013, 12, 31, 5, 0) now = self.start for event_type in ['Foo', 'Bar', 'Zoo', 'Foo', 'Bar', 'Zoo']: trait_models = [models.Trait(name, dtype, value) for name, dtype, value in [ ('trait_A', models.Trait.TEXT_TYPE, "my_%s_text" % event_type), ('trait_B', models.Trait.INT_TYPE, base + 1), ('trait_C', models.Trait.FLOAT_TYPE, float(base) + 0.123456), ('trait_D', models.Trait.DATETIME_TYPE, now)]] self.models.append( models.Event("id_%s_%d" % (event_type, base), event_type, now, trait_models, {'status': {'nested': 'started'}})) base += 100 now = now + datetime.timedelta(hours=1) self.end = now self.conn.record_events(self.models) @tests_db.run_with('sqlite', 'mysql', 'pgsql') class EventTTLTest(EventTestBase): @mock.patch.object(timeutils, 'utcnow') def test_clear_expired_data(self, mock_utcnow): mock_utcnow.return_value = datetime.datetime(2013, 12, 31, 10, 0) self.conn.clear_expired_data(3600, 100) events = list(self.conn.get_events(storage.EventFilter())) self.assertEqual(2, len(events)) event_types = list(self.conn.get_event_types()) self.assertEqual(['Bar', 'Zoo'], event_types) for event_type in event_types: trait_types = list(self.conn.get_trait_types(event_type)) self.assertEqual(4, len(trait_types)) traits = list(self.conn.get_traits(event_type)) self.assertEqual(4, len(traits)) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'mongodb') class EventTest(EventTestBase): def test_duplicate_message_id(self): now = datetime.datetime.utcnow() m = [models.Event("1", "Foo", now, None, {}), models.Event("1", "Zoo", now, [], {})] with mock.patch('%s.LOG' % self.conn.record_events.__module__) as log: self.conn.record_events(m) self.assertEqual(1, log.debug.call_count) def test_bad_event(self): now = datetime.datetime.utcnow() broken_event = models.Event("1", "Foo", now, None, {}) del(broken_event.__dict__['raw']) m = [broken_event, broken_event] with mock.patch('%s.LOG' % self.conn.record_events.__module__) as log: self.assertRaises(AttributeError, self.conn.record_events, m) # ensure that record_events does not break on first error but # delays exception and tries to record each event. self.assertEqual(2, log.exception.call_count) class BigIntegerTest(EventTestBase): def test_trait_bigint(self): big = 99999999999999 new_events = [models.Event( "id_testid", "MessageIDTest", self.start, [models.Trait('int', models.Trait.INT_TYPE, big)], {})] self.conn.record_events(new_events) class GetEventTest(EventTestBase): def test_generated_is_datetime(self): event_filter = storage.EventFilter(self.start, self.end) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(6, len(events)) for i, event in enumerate(events): self.assertIsInstance(event.generated, datetime.datetime) self.assertEqual(event.generated, self.models[i].generated) model_traits = self.models[i].traits for j, trait in enumerate(event.traits): if trait.dtype == models.Trait.DATETIME_TYPE: self.assertIsInstance(trait.value, datetime.datetime) self.assertEqual(trait.value, model_traits[j].value) def test_simple_get(self): event_filter = storage.EventFilter(self.start, self.end) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(6, len(events)) start_time = None for i, type in enumerate(['Foo', 'Bar', 'Zoo']): self.assertEqual(type, events[i].event_type) self.assertEqual(4, len(events[i].traits)) # Ensure sorted results ... if start_time is not None: # Python 2.6 has no assertLess :( self.assertTrue(start_time < events[i].generated) start_time = events[i].generated def test_simple_get_event_type(self): expected_trait_values = { 'id_Bar_100': { 'trait_A': 'my_Bar_text', 'trait_B': 101, 'trait_C': 100.123456, 'trait_D': self.start + datetime.timedelta(hours=1) }, 'id_Bar_400': { 'trait_A': 'my_Bar_text', 'trait_B': 401, 'trait_C': 400.123456, 'trait_D': self.start + datetime.timedelta(hours=4) } } event_filter = storage.EventFilter(self.start, self.end, "Bar") events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual("Bar", events[1].event_type) self.assertEqual(4, len(events[0].traits)) self.assertEqual(4, len(events[1].traits)) for event in events: trait_values = expected_trait_values.get(event.message_id, None) if not trait_values: self.fail("Unexpected event ID returned:" % event.message_id) for trait in event.traits: expected_val = trait_values.get(trait.name) if not expected_val: self.fail("Unexpected trait type: %s" % trait.dtype) self.assertEqual(expected_val, trait.value) def test_get_event_trait_filter(self): trait_filters = [{'key': 'trait_B', 'integer': 101}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual(4, len(events[0].traits)) def test_get_event_trait_filter_op_string(self): trait_filters = [{'key': 'trait_A', 'string': 'my_Foo_text', 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_A', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Zoo", events[3].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Zoo", events[0].event_type) trait_filters[0].update({'key': 'trait_A', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Foo", events[2].event_type) def test_get_event_trait_filter_op_integer(self): trait_filters = [{'key': 'trait_B', 'integer': 101, 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Bar", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_B', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Zoo", events[4].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Zoo", events[0].event_type) trait_filters[0].update({'key': 'trait_B', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Foo", events[2].event_type) def test_get_event_trait_filter_op_float(self): trait_filters = [{'key': 'trait_C', 'float': 300.123456, 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_C', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'le'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(2, len(events)) self.assertEqual("Bar", events[0].event_type) trait_filters[0].update({'key': 'trait_C', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) def test_get_event_trait_filter_op_datetime(self): trait_filters = [{'key': 'trait_D', 'datetime': self.start + datetime.timedelta(hours=2), 'op': 'eq'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Zoo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) trait_filters[0].update({'key': 'trait_D', 'op': 'lt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(2, len(events)) trait_filters[0].update({'key': 'trait_D', 'op': 'le'}) self.assertEqual("Bar", events[1].event_type) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Bar", events[1].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'ne'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(5, len(events)) self.assertEqual("Foo", events[2].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'gt'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(3, len(events)) self.assertEqual("Zoo", events[2].event_type) trait_filters[0].update({'key': 'trait_D', 'op': 'ge'}) event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(4, len(events)) self.assertEqual("Bar", events[2].event_type) def test_get_event_multiple_trait_filter(self): trait_filters = [{'key': 'trait_B', 'integer': 1}, {'key': 'trait_C', 'float': 0.123456}, {'key': 'trait_A', 'string': 'my_Foo_text'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("Foo", events[0].event_type) self.assertEqual(4, len(events[0].traits)) def test_get_event_multiple_trait_filter_expect_none(self): trait_filters = [{'key': 'trait_B', 'integer': 1}, {'key': 'trait_A', 'string': 'my_Zoo_text'}] event_filter = storage.EventFilter(self.start, self.end, traits_filter=trait_filters) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(0, len(events)) def test_get_event_types(self): event_types = [e for e in self.conn.get_event_types()] self.assertEqual(3, len(event_types)) self.assertIn("Bar", event_types) self.assertIn("Foo", event_types) self.assertIn("Zoo", event_types) def test_get_trait_types(self): trait_types = [tt for tt in self.conn.get_trait_types("Foo")] self.assertEqual(4, len(trait_types)) trait_type_names = map(lambda x: x['name'], trait_types) self.assertIn("trait_A", trait_type_names) self.assertIn("trait_B", trait_type_names) self.assertIn("trait_C", trait_type_names) self.assertIn("trait_D", trait_type_names) def test_get_trait_types_unknown_event(self): trait_types = [tt for tt in self.conn.get_trait_types("Moo")] self.assertEqual(0, len(trait_types)) def test_get_traits(self): traits = self.conn.get_traits("Bar") # format results in a way that makes them easier to work with trait_dict = {} for trait in traits: trait_dict[trait.name] = trait.dtype self.assertIn("trait_A", trait_dict) self.assertEqual(models.Trait.TEXT_TYPE, trait_dict["trait_A"]) self.assertIn("trait_B", trait_dict) self.assertEqual(models.Trait.INT_TYPE, trait_dict["trait_B"]) self.assertIn("trait_C", trait_dict) self.assertEqual(models.Trait.FLOAT_TYPE, trait_dict["trait_C"]) self.assertIn("trait_D", trait_dict) self.assertEqual(models.Trait.DATETIME_TYPE, trait_dict["trait_D"]) def test_get_all_traits(self): traits = self.conn.get_traits("Foo") traits = sorted([t for t in traits], key=operator.attrgetter('dtype')) self.assertEqual(8, len(traits)) trait = traits[0] self.assertEqual("trait_A", trait.name) self.assertEqual(models.Trait.TEXT_TYPE, trait.dtype) def test_simple_get_event_no_traits(self): new_events = [models.Event("id_notraits", "NoTraits", self.start, [], {})] self.conn.record_events(new_events) event_filter = storage.EventFilter( self.start, self.end, "NoTraits") events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) self.assertEqual("id_notraits", events[0].message_id) self.assertEqual("NoTraits", events[0].event_type) self.assertEqual(0, len(events[0].traits)) def test_simple_get_no_filters(self): event_filter = storage.EventFilter(None, None, None) events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(6, len(events)) def test_get_by_message_id(self): new_events = [models.Event("id_testid", "MessageIDTest", self.start, [], {})] self.conn.record_events(new_events) event_filter = storage.EventFilter(message_id="id_testid") events = [event for event in self.conn.get_events(event_filter)] self.assertEqual(1, len(events)) event = events[0] self.assertEqual("id_testid", event.message_id) def test_simple_get_raw(self): event_filter = storage.EventFilter() events = [event for event in self.conn.get_events(event_filter)] self.assertTrue(events) self.assertEqual({'status': {'nested': 'started'}}, events[0].raw) def test_trait_type_enforced_on_none(self): new_events = [models.Event( "id_testid", "MessageIDTest", self.start, [models.Trait('text', models.Trait.TEXT_TYPE, ''), models.Trait('int', models.Trait.INT_TYPE, 0), models.Trait('float', models.Trait.FLOAT_TYPE, 0.0)], {})] self.conn.record_events(new_events) event_filter = storage.EventFilter(message_id="id_testid") events = [event for event in self.conn.get_events(event_filter)] options = [(models.Trait.TEXT_TYPE, ''), (models.Trait.INT_TYPE, 0.0), (models.Trait.FLOAT_TYPE, 0.0)] for trait in events[0].traits: options.remove((trait.dtype, trait.value)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/functional/test_bin.py0000664000175000017500000000544200000000000021525 0ustar00zuulzuul00000000000000# Copyright 2012 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from oslo_utils import fileutils from panko.tests import base class BinTestCase(base.BaseTestCase): def setUp(self): super(BinTestCase, self).setUp() content = ("[database]\n" "connection=log://localhost\n") content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='panko', suffix='.conf') def tearDown(self): super(BinTestCase, self).tearDown() os.remove(self.tempfile) def test_dbsync_run(self): subp = subprocess.Popen(['panko-dbsync', "--config-file=%s" % self.tempfile]) self.assertEqual(0, subp.wait()) def test_run_expirer_ttl_disabled(self): subp = subprocess.Popen(['panko-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE) out, __ = subp.communicate() self.assertEqual(0, subp.poll()) self.assertIn(b"Nothing to clean, database event " b"time to live is disabled", out) def _test_run_expirer_ttl_enabled(self, ttl_name, data_name): content = ("[database]\n" "%s=1\n" "connection=log://localhost\n" % ttl_name) content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='panko', suffix='.conf') subp = subprocess.Popen(['panko-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE) out, __ = subp.communicate() self.assertEqual(0, subp.poll()) msg = "Dropping 100 %ss data with TTL 1" % data_name msg = msg.encode('utf-8') self.assertIn(msg, out) def test_run_expirer_ttl_enabled(self): self._test_run_expirer_ttl_enabled('event_time_to_live', 'event') ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/mocks.py0000664000175000017500000000665600000000000016700 0ustar00zuulzuul00000000000000 # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import happybase class MockHBaseTable(happybase.Table): def __init__(self, name, connection, data_prefix): # data_prefix is added to all rows which are written # in this test. It allows to divide data from different tests self.data_prefix = data_prefix # We create happybase Table with prefix from # PANKO_TEST_HBASE_TABLE_PREFIX prefix = os.getenv("PANKO_TEST_HBASE_TABLE_PREFIX", 'test') separator = os.getenv( "PANKO_TEST_HBASE_TABLE_PREFIX_SEPARATOR", '_') super(MockHBaseTable, self).__init__( "%s%s%s" % (prefix, separator, name), connection) def put(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).put(row, *args, **kwargs) def scan(self, row_start=None, row_stop=None, row_prefix=None, columns=None, filter=None, timestamp=None, include_timestamp=False, batch_size=10, scan_batching=None, limit=None, sorted_columns=False): # Add data prefix for row parameters # row_prefix could not be combined with row_start or row_stop if not row_start and not row_stop: row_prefix = self.data_prefix + (row_prefix or "") row_start = None row_stop = None elif row_start and not row_stop: # Adding data_prefix to row_start and row_stop does not work # if it looks like row_start = %data_prefix%foo, # row_stop = %data_prefix, because row_start > row_stop filter = self._update_filter_row(filter) row_start = self.data_prefix + row_start else: row_start = self.data_prefix + (row_start or "") row_stop = self.data_prefix + (row_stop or "") gen = super(MockHBaseTable, self).scan(row_start, row_stop, row_prefix, columns, filter, timestamp, include_timestamp, batch_size, scan_batching, limit, sorted_columns) data_prefix_len = len(self.data_prefix) # Restore original row format for row, data in gen: yield (row[data_prefix_len:], data) def row(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).row(row, *args, **kwargs) def delete(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).delete(row, *args, **kwargs) def _update_filter_row(self, filter): if filter: return "PrefixFilter(%s) AND %s" % (self.data_prefix, filter) else: return "PrefixFilter(%s)" % self.data_prefix ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/unit/0000775000175000017500000000000000000000000016154 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/__init__.py0000664000175000017500000000000000000000000020253 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/unit/api/0000775000175000017500000000000000000000000016725 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/api/__init__.py0000664000175000017500000000000000000000000021024 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/api/test_app.py0000664000175000017500000000232700000000000021122 0ustar00zuulzuul00000000000000# Copyright 2014 IBM Corp. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from oslo_config import cfg from panko.api import app from panko import service from panko.tests import base class TestApp(base.BaseTestCase): def setUp(self): super(TestApp, self).setUp() self.CONF = service.prepare_service([], []) def test_api_paste_file_not_exist(self): self.CONF.set_override('api_paste_config', 'non-existent-file') with mock.patch.object(self.CONF, 'find_file') as ff: ff.return_value = None self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app, self.CONF) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/api/test_versions.py0000664000175000017500000000307500000000000022213 0ustar00zuulzuul00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from panko.tests.functional import api V2_MEDIA_TYPES = [ { 'base': 'application/json', 'type': 'application/vnd.openstack.telemetry-v2+json' }, { 'base': 'application/xml', 'type': 'application/vnd.openstack.telemetry-v2+xml' } ] V2_HTML_DESCRIPTION = { 'href': 'https://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', } V2_EXPECTED_RESPONSE = { 'id': 'v2', 'links': [ { 'rel': 'self', 'href': 'http://localhost/v2', }, V2_HTML_DESCRIPTION ], 'media-types': V2_MEDIA_TYPES, 'status': 'stable', 'updated': '2013-02-13T00:00:00Z', } V2_VERSION_RESPONSE = { "version": V2_EXPECTED_RESPONSE } VERSIONS_RESPONSE = { "versions": { "values": [ V2_EXPECTED_RESPONSE ] } } class TestVersions(api.FunctionalTest): def test_versions(self): data = self.get_json('/') self.assertEqual(VERSIONS_RESPONSE, data) ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/unit/api/v2/0000775000175000017500000000000000000000000017254 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/api/v2/__init__.py0000664000175000017500000000000000000000000021353 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/api/v2/test_query.py0000664000175000017500000001502200000000000022032 0ustar00zuulzuul00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to query.""" import datetime from unittest import mock import fixtures from oslotest import base import wsme from panko.api.controllers.v2 import base as v2_base from panko.api.controllers.v2 import events class TestQuery(base.BaseTestCase): def setUp(self): super(TestQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) self.useFixture(fixtures.MockPatch('panko.api.controllers.v2.events' '._build_rbac_query_filters', return_value={'t_filter': [], 'admin_proj': None})) def test_get_value_as_type_with_integer(self): query = v2_base.Query(field='metadata.size', op='eq', value='123', type='integer') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='float') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True', type='boolean') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux', type='string') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_datetime(self): query = v2_base.Query(field='metadata.date', op='eq', value='2014-01-01T05:00:00', type='datetime') self.assertIsInstance(query._get_value_as_type(), datetime.datetime) self.assertIsNone(query._get_value_as_type().tzinfo) def test_get_value_as_type_with_integer_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean_without_type(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string_without_type(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_bad_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='blob') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_with_bad_value(self): query = v2_base.Query(field='metadata.size', op='eq', value='fake', type='integer') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_integer_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='WWW-Layer-4a80714f') expected = 'WWW-Layer-4a80714f' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_boolean_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='True or False') expected = 'True or False' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error(self): # bug 1221736 value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' query = v2_base.Query(field='group_id', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error_colons(self): # bug 1221736 value = 'Ref::StackId' query = v2_base.Query(field='field_name', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) def test_event_query_to_event_filter_with_bad_op(self): # bug 1511592 query = v2_base.Query(field='event_type', op='ne', value='compute.instance.create.end', type='string') self.assertRaises(v2_base.ClientSideError, events._event_query_to_event_filter, [query]) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/api/v2/test_wsme_custom_type.py0000664000175000017500000000213700000000000024276 0ustar00zuulzuul00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base import wsme from panko.api.controllers.v2 import base as v2_base class TestWsmeCustomType(base.BaseTestCase): def test_advenum_default(self): class dummybase(wsme.types.Base): ae = v2_base.AdvEnum("name", str, "one", "other", default="other") obj = dummybase() self.assertEqual("other", obj.ae) obj = dummybase(ae="one") self.assertEqual("one", obj.ae) self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/unit/event/0000775000175000017500000000000000000000000017275 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/event/__init__.py0000664000175000017500000000000000000000000021374 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/unit/meter/0000775000175000017500000000000000000000000017270 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/meter/__init__.py0000664000175000017500000000000000000000000021367 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/unit/storage/0000775000175000017500000000000000000000000017620 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/storage/__init__.py0000664000175000017500000000000000000000000021717 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4821727 panko-10.0.0/panko/tests/unit/storage/sqlalchemy/0000775000175000017500000000000000000000000021762 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/storage/sqlalchemy/__init__.py0000664000175000017500000000000000000000000024061 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/storage/sqlalchemy/test_models.py0000664000175000017500000000732000000000000024660 0ustar00zuulzuul00000000000000# # Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime from unittest import mock from oslotest import base import sqlalchemy from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.types import NUMERIC from panko.storage.sqlalchemy import models from panko import utils class PreciseTimestampTest(base.BaseTestCase): @staticmethod def fake_dialect(name): def _type_descriptor_mock(desc): if type(desc) == DECIMAL: return NUMERIC(precision=desc.precision, scale=desc.scale) dialect = mock.MagicMock() dialect.name = name dialect.type_descriptor = _type_descriptor_mock return dialect def setUp(self): super(PreciseTimestampTest, self).setUp() self._mysql_dialect = self.fake_dialect('mysql') self._postgres_dialect = self.fake_dialect('postgres') self._type = models.PreciseTimestamp() self._date = datetime.datetime(2012, 7, 2, 10, 44) def test_load_dialect_impl_mysql(self): result = self._type.load_dialect_impl(self._mysql_dialect) self.assertEqual(NUMERIC, type(result)) self.assertEqual(20, result.precision) self.assertEqual(6, result.scale) self.assertTrue(result.asdecimal) def test_load_dialect_impl_postgres(self): result = self._type.load_dialect_impl(self._postgres_dialect) self.assertEqual(sqlalchemy.DateTime, type(result)) def test_process_bind_param_store_decimal_mysql(self): expected = utils.dt_to_decimal(self._date) result = self._type.process_bind_param(self._date, self._mysql_dialect) self.assertEqual(expected, result) def test_process_bind_param_store_datetime_postgres(self): result = self._type.process_bind_param(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_bind_param_store_none_mysql(self): result = self._type.process_bind_param(None, self._mysql_dialect) self.assertIsNone(result) def test_process_bind_param_store_none_postgres(self): result = self._type.process_bind_param(None, self._postgres_dialect) self.assertIsNone(result) def test_process_result_value_datetime_mysql(self): dec_value = utils.dt_to_decimal(self._date) result = self._type.process_result_value(dec_value, self._mysql_dialect) self.assertEqual(self._date, result) def test_process_result_value_datetime_postgres(self): result = self._type.process_result_value(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_result_value_none_mysql(self): result = self._type.process_result_value(None, self._mysql_dialect) self.assertIsNone(result) def test_process_result_value_none_postgres(self): result = self._type.process_result_value(None, self._postgres_dialect) self.assertIsNone(result) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/storage/test_get_connection.py0000664000175000017500000000604500000000000024234 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for panko/storage/ """ from unittest import mock from oslotest import base from panko import service from panko import storage from panko.storage import impl_log from panko.storage import impl_sqlalchemy class EngineTest(base.BaseTestCase): def test_get_connection(self): engine = storage.get_connection('log://localhost', None) self.assertIsInstance(engine, impl_log.Connection) def test_get_connection_no_such_engine(self): try: storage.get_connection('no-such-engine://localhost', None) except RuntimeError as err: self.assertIn('no-such-engine', str(err)) class ConnectionRetryTest(base.BaseTestCase): def setUp(self): super(ConnectionRetryTest, self).setUp() self.CONF = service.prepare_service([], config_files=[]) def test_retries(self): # stevedore gives warning log instead of any exception with mock.patch.object(storage, 'get_connection', side_effect=Exception) as retries: try: self.CONF.set_override("retry_interval", 1, group="database") self.CONF.set_override("max_retries", 3, group="database") storage.get_connection_from_config(self.CONF) except Exception: self.assertEqual(3, retries.call_count) else: self.fail() class ConnectionConfigTest(base.BaseTestCase): def setUp(self): super(ConnectionConfigTest, self).setUp() self.CONF = service.prepare_service([], config_files=[]) def test_only_default_url(self): self.CONF.set_override("connection", "log://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) def test_two_urls(self): self.CONF.set_override("connection", "log://", group="database") self.CONF.set_override("event_connection", "sqlite://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) def test_sqlalchemy_driver(self): self.CONF.set_override("connection", "sqlite+pysqlite://", group="database") conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_sqlalchemy.Connection) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/tests/unit/test_utils.py0000664000175000017500000000636300000000000020735 0ustar00zuulzuul00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for panko/utils.py """ import datetime import decimal from oslotest import base from panko import utils class TestUtils(base.BaseTestCase): def test_datetime_to_decimal(self): expected = 1356093296.12 utc_datetime = datetime.datetime.utcfromtimestamp(expected) actual = utils.dt_to_decimal(utc_datetime) self.assertAlmostEqual(expected, float(actual), places=5) def test_decimal_to_datetime(self): expected = 1356093296.12 dexpected = decimal.Decimal(str(expected)) # Python 2.6 wants str() expected_datetime = datetime.datetime.utcfromtimestamp(expected) actual_datetime = utils.decimal_to_dt(dexpected) # Python 3 have rounding issue on this, so use float self.assertAlmostEqual(utils.dt_to_decimal(expected_datetime), utils.dt_to_decimal(actual_datetime), places=5) def test_recursive_keypairs(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B'}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested:a', 'A'), ('nested:b', 'B')], pairs) def test_recursive_keypairs_with_separator(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } separator = '.' pairs = list(utils.recursive_keypairs(data, separator)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B')], pairs) def test_recursive_keypairs_with_list_of_dict(self): small = 1 big = 1 << 64 expected = [('a', 'A'), ('b', 'B'), ('nested:list', [{small: 99, big: 42}])] data = {'a': 'A', 'b': 'B', 'nested': {'list': [{small: 99, big: 42}]}} pairs = list(utils.recursive_keypairs(data)) self.assertEqual(len(expected), len(pairs)) for k, v in pairs: # the keys 1 and 1<<64 cause a hash collision on 64bit platforms if k == 'nested:list': self.assertIn(v, [[{small: 99, big: 42}], [{big: 42, small: 99}]]) else: self.assertIn((k, v), expected) def test_decimal_to_dt_with_none_parameter(self): self.assertIsNone(utils.decimal_to_dt(None)) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/utils.py0000664000175000017500000000735700000000000015561 0ustar00zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utilities and helper functions.""" import calendar import copy import datetime import decimal from oslo_utils import timeutils from oslo_utils import units def decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): temp = {} # If the input data is a dict, create an equivalent dict with a # predictable insertion order to avoid inconsistencies in the # message signature computation for equivalent payloads modulo # ordering for key, value in sorted(input.items()): temp[decode_unicode(key)] = decode_unicode(value) return temp elif isinstance(input, (tuple, list)): # When doing a pair of JSON encode/decode operations to the tuple, # the tuple would become list. So we have to generate the value as # list here. return [decode_unicode(element) for element in input] elif isinstance(input, bytes): return input.decode('utf-8') else: return input def recursive_keypairs(d, separator=':'): """Generator that produces sequence of keypairs for nested dictionaries.""" for name, value in sorted(d.items()): if isinstance(value, dict): for subname, subvalue in recursive_keypairs(value, separator): yield ('%s%s%s' % (name, separator, subname), subvalue) elif isinstance(value, (tuple, list)): yield name, decode_unicode(value) else: yield name, value def dt_to_decimal(utc): """Datetime to Decimal. Some databases don't store microseconds in datetime so we always store as Decimal unixtime. """ if utc is None: return None decimal.getcontext().prec = 30 return (decimal.Decimal(str(calendar.timegm(utc.utctimetuple()))) + (decimal.Decimal(str(utc.microsecond)) / decimal.Decimal("1000000.0"))) def decimal_to_dt(dec): """Return a datetime from Decimal unixtime format.""" if dec is None: return None integer = int(dec) micro = (dec - decimal.Decimal(integer)) * decimal.Decimal(units.M) daittyme = datetime.datetime.utcfromtimestamp(integer) return daittyme.replace(microsecond=int(round(micro))) def sanitize_timestamp(timestamp): """Return a naive utc datetime object.""" if not timestamp: return timestamp if not isinstance(timestamp, datetime.datetime): timestamp = timeutils.parse_isotime(timestamp) return timeutils.normalize_time(timestamp) def update_nested(original_dict, updates): """Updates the leaf nodes in a nest dict. Updates occur without replacing entire sub-dicts. """ dict_to_update = copy.deepcopy(original_dict) for key, value in updates.items(): if isinstance(value, dict): sub_dict = update_nested(dict_to_update.get(key, {}), value) dict_to_update[key] = sub_dict else: dict_to_update[key] = updates[key] return dict_to_update ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/panko/version.py0000664000175000017500000000120400000000000016067 0ustar00zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('panko') ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4661727 panko-10.0.0/panko.egg-info/0000775000175000017500000000000000000000000015525 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/PKG-INFO0000664000175000017500000000413400000000000016624 0ustar00zuulzuul00000000000000Metadata-Version: 1.2 Name: panko Version: 10.0.0 Summary: Event storage publisher and API for Ceilometer Home-page: https://docs.openstack.org/panko/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: panko ===== The Panko project is an event storage service that provides the ability to store and querying event data generated by Ceilometer with potentially other sources. Panko is a component of the OpenStack Telemetry project. ------------- Documentation ------------- Documentation for the project can be found at: https://docs.openstack.org/panko/latest/ Launchpad Projects ------------------ - Server: https://launchpad.net/panko - Client: https://launchpad.net/python-pankoclient Code Repository --------------- - Server: https://github.com/openstack/panko - Client: https://github.com/openstack/python-pankoclient Bug Tracking ------------ - Bugs: https://storyboard.openstack.org/#!/project/openstack/panko IRC --- IRC Channel: #openstack-telemetry on Freenode. Release notes ------------- Release notes: https://docs.openstack.org/releasenotes/panko/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Classifier: Programming Language :: Python :: 3.8 Classifier: Topic :: System :: Monitoring Requires-Python: >=3.6 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/SOURCES.txt0000664000175000017500000001320000000000000017405 0ustar00zuulzuul00000000000000.coveragerc .mailmap .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst babel.cfg bindep.txt requirements.txt run-tests.sh setup.cfg setup.py test-requirements.txt tox.ini devstack/README.rst devstack/apache-panko.template devstack/plugin.sh devstack/settings devstack/lib/elasticsearch.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh doc/Makefile doc/requirements.txt doc/source/conf.py doc/source/index.rst doc/source/api/index.rst doc/source/configuration/sample_policy.rst doc/source/contributor/contributing.rst doc/source/contributor/gmr.rst doc/source/contributor/index.rst doc/source/contributor/testing.rst doc/source/install/development.rst doc/source/install/index.rst doc/source/install/manual.rst doc/source/install/mod_wsgi.rst doc/source/install/uwsgi.rst doc/source/webapi/index.rst doc/source/webapi/v2.rst etc/panko/api_paste.ini etc/panko/panko-config-generator.conf etc/panko/panko-policy-generator.conf panko/__init__.py panko/i18n.py panko/opts.py panko/profiler.py panko/service.py panko/utils.py panko/version.py panko.egg-info/PKG-INFO panko.egg-info/SOURCES.txt panko.egg-info/dependency_links.txt panko.egg-info/entry_points.txt panko.egg-info/not-zip-safe panko.egg-info/pbr.json panko.egg-info/requires.txt panko.egg-info/top_level.txt panko/api/__init__.py panko/api/app.py panko/api/app.wsgi panko/api/hooks.py panko/api/middleware.py panko/api/rbac.py panko/api/controllers/__init__.py panko/api/controllers/root.py panko/api/controllers/v2/__init__.py panko/api/controllers/v2/base.py panko/api/controllers/v2/capabilities.py panko/api/controllers/v2/events.py panko/api/controllers/v2/root.py panko/api/controllers/v2/utils.py panko/cmd/__init__.py panko/cmd/storage.py panko/conf/__init__.py panko/conf/defaults.py panko/hacking/__init__.py panko/hacking/checks.py panko/locale/en_GB/LC_MESSAGES/panko.po panko/locale/ko_KR/LC_MESSAGES/panko.po panko/policies/__init__.py panko/policies/base.py panko/policies/segregation.py panko/policies/telemetry.py panko/publisher/__init__.py panko/publisher/database.py panko/storage/__init__.py panko/storage/base.py panko/storage/impl_elasticsearch.py panko/storage/impl_hbase.py panko/storage/impl_log.py panko/storage/impl_mongodb.py panko/storage/impl_sqlalchemy.py panko/storage/models.py panko/storage/pymongo_base.py panko/storage/hbase/__init__.py panko/storage/hbase/base.py panko/storage/hbase/inmemory.py panko/storage/hbase/utils.py panko/storage/mongo/__init__.py panko/storage/mongo/utils.py panko/storage/sqlalchemy/__init__.py panko/storage/sqlalchemy/models.py panko/storage/sqlalchemy/alembic/README panko/storage/sqlalchemy/alembic/alembic.ini panko/storage/sqlalchemy/alembic/env.py panko/storage/sqlalchemy/alembic/script.py.mako panko/storage/sqlalchemy/alembic/versions/c3955547bff2_support_big_integer_traits.py panko/tests/__init__.py panko/tests/base.py panko/tests/db.py panko/tests/mocks.py panko/tests/functional/__init__.py panko/tests/functional/test_bin.py panko/tests/functional/api/__init__.py panko/tests/functional/api/v2/__init__.py panko/tests/functional/api/v2/test_acl_scenarios.py panko/tests/functional/api/v2/test_app.py panko/tests/functional/api/v2/test_capabilities.py panko/tests/functional/api/v2/test_event_scenarios.py panko/tests/functional/gabbi/__init__.py panko/tests/functional/gabbi/fixtures.py panko/tests/functional/gabbi/test_gabbi.py panko/tests/functional/gabbi/test_gabbi_prefix.py panko/tests/functional/gabbi/gabbits/api-events-no-data.yaml panko/tests/functional/gabbi/gabbits/api-events-with-data.yaml panko/tests/functional/gabbi/gabbits/basic.yaml panko/tests/functional/gabbi/gabbits/capabilities.yaml panko/tests/functional/gabbi/gabbits/middleware.yaml panko/tests/functional/gabbi/gabbits_prefix/basic.yaml panko/tests/functional/hooks/post_test_hook.sh panko/tests/functional/publisher/__init__.py panko/tests/functional/storage/__init__.py panko/tests/functional/storage/test_impl_mongodb.py panko/tests/functional/storage/test_impl_sqlalchemy.py panko/tests/functional/storage/test_storage_scenarios.py panko/tests/unit/__init__.py panko/tests/unit/test_utils.py panko/tests/unit/api/__init__.py panko/tests/unit/api/test_app.py panko/tests/unit/api/test_versions.py panko/tests/unit/api/v2/__init__.py panko/tests/unit/api/v2/test_query.py panko/tests/unit/api/v2/test_wsme_custom_type.py panko/tests/unit/event/__init__.py panko/tests/unit/meter/__init__.py panko/tests/unit/storage/__init__.py panko/tests/unit/storage/test_get_connection.py panko/tests/unit/storage/sqlalchemy/__init__.py panko/tests/unit/storage/sqlalchemy/test_models.py releasenotes/notes/add-connection-params-to-ES-6b8901686d3ed4fa.yaml releasenotes/notes/ceilometer-panko-publisher-763231cca21cb2ec.yaml releasenotes/notes/deprecate-hbase-79e7e9a77fa3ad2b.yaml releasenotes/notes/drop-py-2-7-21cdb68d76ab2f6b.yaml releasenotes/notes/pecan-debug-removed-b7f4c72c7756bdc1.yaml releasenotes/notes/remove-dispatcher-bf3b609fec150094.yaml releasenotes/notes/support-admin-get-all-events-ffa33e4156647bb9.yaml releasenotes/notes/victoria-support-batch-delete-events-4c63a758bdda93d1.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/ussuri.rst releasenotes/source/victoria.rst releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po tools/__init__.py tools/make_test_event_data.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/dependency_links.txt0000664000175000017500000000000100000000000021573 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/entry_points.txt0000664000175000017500000000142200000000000021022 0ustar00zuulzuul00000000000000[ceilometer.event.publisher] panko = panko.publisher.database:DatabasePublisher [console_scripts] panko-dbsync = panko.cmd.storage:dbsync panko-expirer = panko.cmd.storage:expirer [oslo.config.opts] panko = panko.opts:list_opts [oslo.config.opts.defaults] panko = panko.conf.defaults:set_cors_middleware_defaults [oslo.policy.policies] panko = panko.policies:list_policies [panko.storage] es = panko.storage.impl_elasticsearch:Connection hbase = panko.storage.impl_hbase:Connection log = panko.storage.impl_log:Connection mongodb = panko.storage.impl_mongodb:Connection mysql = panko.storage.impl_sqlalchemy:Connection postgresql = panko.storage.impl_sqlalchemy:Connection sqlite = panko.storage.impl_sqlalchemy:Connection [wsgi_scripts] panko-api = panko.api.app:build_wsgi_app ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/not-zip-safe0000664000175000017500000000000100000000000017753 0ustar00zuulzuul00000000000000 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/pbr.json0000664000175000017500000000005700000000000017205 0ustar00zuulzuul00000000000000{"git_version": "8bc176fd", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/requires.txt0000664000175000017500000000077600000000000020137 0ustar00zuulzuul00000000000000Paste PasteDeploy>=1.5.0 PyYAML>=3.1.0 SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 WSME>=0.8 WebOb>=1.2.3 alembic!=0.8.1,!=0.9.0,>=0.7.6 debtcollector>=1.2.0 elasticsearch<3.0.0 keystonemiddleware>=5.1.0 lxml>=2.3 oslo.config>=3.9.0 oslo.context>=2.22.0 oslo.db>=4.1.0 oslo.i18n>=2.1.0 oslo.log>=4.3.0 oslo.middleware>=3.10.0 oslo.policy>=3.6.0 oslo.reports>=0.6.0 oslo.serialization>=2.25.0 oslo.utils>=3.5.0 pbr>=2.0.0 pecan>=1.0.0 pymongo!=3.1 python-dateutil>=2.4.2 stevedore>=1.9.0 tenacity>=3.1.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398898.0 panko-10.0.0/panko.egg-info/top_level.txt0000664000175000017500000000000600000000000020253 0ustar00zuulzuul00000000000000panko ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4541726 panko-10.0.0/releasenotes/0000775000175000017500000000000000000000000015414 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4861727 panko-10.0.0/releasenotes/notes/0000775000175000017500000000000000000000000016544 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/add-connection-params-to-ES-6b8901686d3ed4fa.yaml0000664000175000017500000000054700000000000026757 0ustar00zuulzuul00000000000000--- features: - | Add new connection parameters to the Elasticsearch event database configuration to specify a custom index name where to save the events called ``es_index_name`` (events will be created under ``my_index_YYYY-MM-DD`` if ``es_index_name`` is ``my_index``) and ``es_ssl_enabled`` to allow SSL connections to Elasticsearch. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/ceilometer-panko-publisher-763231cca21cb2ec.yaml0000664000175000017500000000066300000000000027142 0ustar00zuulzuul00000000000000--- features: - | A new ceilometer to panko publisher is created to avoid ceilometer collector dependency. This streamlines the process of pushing data to Panko upgrade: - | In ceilometer.conf, remove `panko` from event_dispatchers. Add `panko://` to the publishers in event_pipeline.yaml deprecations: - | The ceilometer to panko dispatcher is now deprecated. The publisher should be used going forward. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/deprecate-hbase-79e7e9a77fa3ad2b.yaml0000664000175000017500000000032500000000000025035 0ustar00zuulzuul00000000000000--- deprecations: - | Panko's HBase driver is untested and unmaintained. A quick query confirms no one cares about it. Therefore, the Hbase driver is gone and will be removed in the following cycle. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/drop-py-2-7-21cdb68d76ab2f6b.yaml0000664000175000017500000000030300000000000023674 0ustar00zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of Panko to support py2.7 is OpenStack Train. The minimum version of Python now supported by Panko is Python 3.6. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/pecan-debug-removed-b7f4c72c7756bdc1.yaml0000664000175000017500000000007600000000000025546 0ustar00zuulzuul00000000000000--- upgrade: - The api.pecan_debug option has been removed. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/remove-dispatcher-bf3b609fec150094.yaml0000664000175000017500000000010000000000000025252 0ustar00zuulzuul00000000000000--- upgrade: - | Remove deprecated Ceilometer dispatcher. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/support-admin-get-all-events-ffa33e4156647bb9.yaml0000664000175000017500000000022400000000000027300 0ustar00zuulzuul00000000000000--- features: - | Specify the 'all_tenants=True' query parameter to get all events for all projects, this is only allowed by admin users. ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/notes/victoria-support-batch-delete-events-4c63a758bdda93d1.yaml0000664000175000017500000000066500000000000031110 0ustar00zuulzuul00000000000000--- features: - | A new ``events_delete_batch_size`` config option is introduced to specify a number of events to be deleted in one iteration from the database. It will help when thare're a lot of events in the database and panko-expire consumes a lot of memory to delete all records with a single call. fixes: | Fixed the issue that panko-expire is consuming too much memory during events cleaning up. ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4861727 panko-10.0.0/releasenotes/source/0000775000175000017500000000000000000000000016714 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/conf.py0000664000175000017500000002113100000000000020211 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'reno.sphinxext', 'openstackdocstheme', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # openstackdocstheme options openstackdocs_repo_name = 'openstack/panko' openstackdocs_auto_name = False openstackdocs_bug_project = 'panko' openstackdocs_bug_tag = '' project = u'Panko Release Notes' copyright = u'2015, Panko Developers' # Release notes do not need a version number in the title, they # cover multiple releases. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'native' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'PankoReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'PankoReleaseNotes.tex', u'Panko Release Notes Documentation', u'Panko Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'Pankoreleasenotes', u'Panko Release Notes Documentation', [u'Panko Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'PankoReleaseNotes', u'Panko Release Notes Documentation', u'Panko Developers', 'PankoReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/index.rst0000664000175000017500000000027100000000000020555 0ustar00zuulzuul00000000000000==================== Panko Release Notes ==================== .. toctree:: :maxdepth: 1 unreleased victoria ussuri train stein queens pike ocata newton ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4541726 panko-10.0.0/releasenotes/source/locale/0000775000175000017500000000000000000000000020153 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4541726 panko-10.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000000000000000021125 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4861727 panko-10.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000000000000000022712 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000001230400000000000025743 0ustar00zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata # Andi Chandler , 2019. #zanata # Andi Chandler , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: Panko Release Notes\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2020-09-25 06:51+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-10-10 02:32+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid "2.0.0" msgstr "2.0.0" msgid "3.0.0" msgstr "3.0.0" msgid "3.1.0" msgstr "3.1.0" msgid "4.0.0" msgstr "4.0.0" msgid "8.0.0" msgstr "8.0.0" msgid "9.0.0.0rc1" msgstr "9.0.0.0rc1" msgid "" "A new ``events_delete_batch_size`` config option is introduced to specify a " "number of events to be deleted in one iteration from the database. It will " "help when thare're a lot of events in the database and panko-expire consumes " "a lot of memory to delete all records with a single call." msgstr "" "A new ``events_delete_batch_size`` config option is introduced to specify a " "number of events to be deleted in one iteration from the database. It will " "help when there are a lot of events in the database and panko-expire " "consumes a lot of memory to delete all records with a single call." msgid "" "A new ceilometer to panko publisher is created to avoid ceilometer collector " "dependency. This streamlines the process of pushing data to Panko" msgstr "" "A new Ceilometer to Panko publisher is created to avoid Ceilometer collector " "dependency. This streamlines the process of pushing data to Panko" msgid "" "Add new connection parameters to the Elasticsearch event database " "configuration to specify a custom index name where to save the events called " "``es_index_name`` (events will be created under ``my_index_YYYY-MM-DD`` if " "``es_index_name`` is ``my_index``) and ``es_ssl_enabled`` to allow SSL " "connections to Elasticsearch." msgstr "" "Add new connection parameters to the Elasticsearch event database " "configuration to specify a custom index name where to save the events called " "``es_index_name`` (events will be created under ``my_index_YYYY-MM-DD`` if " "``es_index_name`` is ``my_index``) and ``es_ssl_enabled`` to allow SSL " "connections to Elasticsearch." msgid "Bug Fixes" msgstr "Bug Fixes" msgid "Current Series Release Notes" msgstr "Current Series Release Notes" msgid "Deprecation Notes" msgstr "Deprecation Notes" msgid "" "Fixed the issue that panko-expire is consuming too much memory during events " "cleaning up." msgstr "" "Fixed the issue that panko-expire is consuming too much memory during events " "cleaning up." msgid "" "In ceilometer.conf, remove `panko` from event_dispatchers. Add `panko://` to " "the publishers in event_pipeline.yaml" msgstr "" "In ceilometer.conf, remove `panko` from event_dispatchers. Add `panko://` to " "the publishers in event_pipeline.yaml" msgid "New Features" msgstr "New Features" msgid "Newton Series Release Notes" msgstr "Newton Series Release Notes" msgid "Ocata Series Release Notes" msgstr "Ocata Series Release Notes" msgid "Panko Release Notes" msgstr "Panko Release Notes" msgid "" "Panko's HBase driver is untested and unmaintained. A quick query confirms no " "one cares about it. Therefore, the Hbase driver is gone and will be removed " "in the following cycle." msgstr "" "Panko's HBase driver is untested and unmaintained. A quick query confirms no " "one cares about it. Therefore, the Hbase driver is gone and will be removed " "in the following cycle." msgid "Pike Series Release Notes" msgstr "Pike Series Release Notes" msgid "" "Python 2.7 support has been dropped. Last release of Panko to support py2.7 " "is OpenStack Train. The minimum version of Python now supported by Panko is " "Python 3.6." msgstr "" "Python 2.7 support has been dropped. Last release of Panko to support Python " "2.7 is OpenStack Train. The minimum version of Python now supported by Panko " "is Python 3.6." msgid "Queens Series Release Notes" msgstr "Queens Series Release Notes" msgid "Remove deprecated Ceilometer dispatcher." msgstr "Remove deprecated Ceilometer dispatcher." msgid "" "Specify the 'all_tenants=True' query parameter to get all events for all " "projects, this is only allowed by admin users." msgstr "" "Specify the 'all_tenants=True' query parameter to get all events for all " "projects, this is only allowed by admin users." msgid "Stein Series Release Notes" msgstr "Stein Series Release Notes" msgid "The api.pecan_debug option has been removed." msgstr "The api.pecan_debug option has been removed." msgid "" "The ceilometer to panko dispatcher is now deprecated. The publisher should " "be used going foward." msgstr "" "The Ceilometer to Panko dispatcher is now deprecated. The publisher should " "be used going forward." msgid "Train Series Release Notes" msgstr "Train Series Release Notes" msgid "Upgrade Notes" msgstr "Upgrade Notes" msgid "Ussuri Series Release Notes" msgstr "Ussuri Series Release Notes" msgid "Victoria Series Release Notes" msgstr "Victoria Series Release Notes" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4541726 panko-10.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000000000000000020562 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4861727 panko-10.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000000000000000022347 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000121600000000000025400 0ustar00zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: Panko Release Notes 2.0.1\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-03-10 14:55+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-10-22 06:07+0000\n" "Last-Translator: Gérald LONLAS \n" "Language-Team: French\n" "Language: fr\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=2; plural=(n > 1)\n" msgid "Newton Series Release Notes" msgstr "Note de release pour Newton" msgid "Panko Release Notes" msgstr "Note de release pour Panko" ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4541726 panko-10.0.0/releasenotes/source/locale/ja/0000775000175000017500000000000000000000000020545 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4901726 panko-10.0.0/releasenotes/source/locale/ja/LC_MESSAGES/0000775000175000017500000000000000000000000022332 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po0000664000175000017500000000421700000000000025367 0ustar00zuulzuul00000000000000# Shu Muto , 2017. #zanata msgid "" msgstr "" "Project-Id-Version: Panko Release Notes 2.0.1\n" "Report-Msgid-Bugs-To: \n" "POT-Creation-Date: 2017-03-09 13:27+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2017-03-06 08:26+0000\n" "Last-Translator: Shu Muto \n" "Language-Team: Japanese\n" "Language: ja\n" "X-Generator: Zanata 3.9.6\n" "Plural-Forms: nplurals=1; plural=0\n" msgid "2.0.0" msgstr "2.0.0" msgid "" "A new ceilometer to panko publisher is created to avoid ceilometer collector " "dependency. This streamlines the process of pushing data to Panko" msgstr "" "Ceilometer のコレクターへの依存を避けるために、 ceilometer から panko への新" "しいパブリッシャーを作成しました。これにより、Panko にデータをプッシュするプ" "ロセスが合理化されます" msgid "Current Series Release Notes" msgstr "開発中バージョンのリリースノート" msgid "Deprecation Notes" msgstr "廃止予定の機能" msgid "" "In ceilometer.conf, remove `panko` from event_dispatchers. Add `panko://` to " "the publishers in event_pipeline.yaml" msgstr "" "ceilometer.conf の event_dispatchers から `panko` を削除します。 " "event_pipeline.yaml 内のパブリッシャーに `panko://` を追加してください。" msgid "New Features" msgstr "新機能" msgid "Newton Series Release Notes" msgstr "Newton バージョンのリリースノート" msgid "Ocata Series Release Notes" msgstr "Ocata バージョンのリリースノート" msgid "Panko Release Notes" msgstr "Panko リリースノート" msgid "The api.pecan_debug option has been removed." msgstr "api.pecan_debug オプションを削除しました。" msgid "" "The ceilometer to panko dispatcher is now deprecated. The publisher should " "be used going foward." msgstr "" "ceilometer から panko へのディスパッチャーは非推奨になりました。今後は、パブ" "リッシャーを使用する必要があります。" msgid "Upgrade Notes" msgstr "アップグレード時の注意" ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/newton.rst0000664000175000017500000000021400000000000020755 0ustar00zuulzuul00000000000000============================ Newton Series Release Notes ============================ .. release-notes:: :branch: origin/stable/newton ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/ocata.rst0000664000175000017500000000021000000000000020526 0ustar00zuulzuul00000000000000=========================== Ocata Series Release Notes =========================== .. release-notes:: :branch: origin/stable/ocata ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/pike.rst0000664000175000017500000000021700000000000020376 0ustar00zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/queens.rst0000664000175000017500000000022300000000000020743 0ustar00zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/stein.rst0000664000175000017500000000022100000000000020563 0ustar00zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/train.rst0000664000175000017500000000017600000000000020567 0ustar00zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015600000000000021577 0ustar00zuulzuul00000000000000============================= Current Series Release Notes ============================= .. release-notes:: ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/ussuri.rst0000664000175000017500000000020200000000000020772 0ustar00zuulzuul00000000000000=========================== Ussuri Series Release Notes =========================== .. release-notes:: :branch: stable/ussuri ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/releasenotes/source/victoria.rst0000664000175000017500000000021200000000000021261 0ustar00zuulzuul00000000000000============================= Victoria Series Release Notes ============================= .. release-notes:: :branch: stable/victoria ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/requirements.txt0000664000175000017500000000213600000000000016211 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. debtcollector>=1.2.0 # Apache-2.0 tenacity>=3.1.0 # Apache-2.0 keystonemiddleware>=5.1.0 # Apache-2.0 lxml>=2.3 # BSD oslo.db>=4.1.0 # Apache-2.0 oslo.config>=3.9.0 # Apache-2.0 oslo.context>=2.22.0 # Apache-2.0 oslo.i18n>=2.1.0 # Apache-2.0 oslo.log>=4.3.0 # Apache-2.0 oslo.policy>=3.6.0 # Apache-2.0 oslo.reports>=0.6.0 # Apache-2.0 Paste PasteDeploy>=1.5.0 # MIT pbr>=2.0.0 # Apache-2.0 pecan>=1.0.0 # BSD oslo.middleware>=3.10.0 # Apache-2.0 oslo.serialization>=2.25.0 # Apache-2.0 oslo.utils>=3.5.0 # Apache-2.0 PyYAML>=3.1.0 # MIT SQLAlchemy!=1.1.5,!=1.1.6,!=1.1.7,!=1.1.8,>=1.0.10 # MIT stevedore>=1.9.0 # Apache-2.0 WebOb>=1.2.3 # MIT WSME>=0.8 # MIT alembic>=0.7.6,!=0.8.1,!=0.9.0 # NOTE(jd) We do not import it directly, but WSME datetime string parsing # behaviour changes when this library is installed python-dateutil>=2.4.2 # BSD pymongo!=3.1 # Apache-2.0 elasticsearch<3.0.0 # Apache-2.0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/run-tests.sh0000775000175000017500000000037400000000000015232 0ustar00zuulzuul00000000000000#!/bin/bash set -e set -o pipefail # Run unit test export OS_TEST_PATH=panko/tests/unit stestr run $* # Run functional test export OS_TEST_PATH=panko/tests/functional/ for backend in $PANKO_BACKENDS; do pifpaf run $backend -- stestr run $* done ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4901726 panko-10.0.0/setup.cfg0000664000175000017500000000365300000000000014553 0ustar00zuulzuul00000000000000[metadata] name = panko summary = Event storage publisher and API for Ceilometer description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = https://docs.openstack.org/panko/latest/ python-requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 Programming Language :: Python :: 3.8 Topic :: System :: Monitoring [files] packages = panko [entry_points] panko.storage = es = panko.storage.impl_elasticsearch:Connection log = panko.storage.impl_log:Connection mongodb = panko.storage.impl_mongodb:Connection mysql = panko.storage.impl_sqlalchemy:Connection postgresql = panko.storage.impl_sqlalchemy:Connection sqlite = panko.storage.impl_sqlalchemy:Connection hbase = panko.storage.impl_hbase:Connection console_scripts = panko-dbsync = panko.cmd.storage:dbsync panko-expirer = panko.cmd.storage:expirer wsgi_scripts = panko-api = panko.api.app:build_wsgi_app ceilometer.event.publisher = panko = panko.publisher.database:DatabasePublisher oslo.config.opts = panko = panko.opts:list_opts oslo.policy.policies = panko = panko.policies:list_policies oslo.config.opts.defaults = panko = panko.conf.defaults:set_cors_middleware_defaults [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = panko/locale/panko.pot [compile_catalog] directory = panko/locale domain = panko [update_catalog] domain = panko output_dir = panko/locale input_file = panko/locale/panko.pot [egg_info] tag_build = tag_date = 0 ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/setup.py0000664000175000017500000000126200000000000014436 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr'], pbr=True) ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/test-requirements.txt0000664000175000017500000000115400000000000017165 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. coverage>=3.6 # Apache-2.0 elasticsearch>=1.3.0 # Apache-2.0 fixtures>=3.0.0 # Apache-2.0/BSD PyMySQL>=0.6.2 # MIT License oslotest>=1.10.0 # Apache-2.0 psycopg2>=2.5 # LGPL/ZPL pymongo!=3.1,>=3.0.2 # Apache-2.0 python-subunit>=0.0.18 # Apache-2.0/BSD stestr>=2.0.0 # Apache-2.0 testtools>=1.4.0 # MIT gabbi>=1.11.0 # Apache-2.0 os-testr>=0.4.1 # Apache-2.0 WebTest>=2.0 # MIT pifpaf>=0.0.11 sqlalchemy-utils ././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1618398898.4901726 panko-10.0.0/tools/0000775000175000017500000000000000000000000014063 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/tools/__init__.py0000664000175000017500000000000000000000000016162 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/tools/make_test_event_data.py0000775000175000017500000000656400000000000020621 0ustar00zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Command line tool for creating event test data for Panko. Usage: Generate testing data for e.g. for default time span . .tox/py27/bin/activate ./tools/make_test_event_data.py --event_types 3 """ import argparse import datetime import random from oslo_utils import timeutils from oslo_utils import uuidutils from panko import service from panko import storage from panko.storage import models def make_test_data(conn, start, end, interval, event_types): # Compute start and end timestamps for the new data. if isinstance(start, datetime.datetime): timestamp = start else: timestamp = timeutils.parse_strtime(start) if not isinstance(end, datetime.datetime): end = timeutils.parse_strtime(end) increment = datetime.timedelta(minutes=interval) print('Adding new events') n = 0 while timestamp <= end: data = [] for i in range(event_types): traits = [models.Trait('id1_%d' % i, 1, uuidutils.generate_uuid()), models.Trait('id2_%d' % i, 2, random.randint(1, 10)), models.Trait('id3_%d' % i, 3, random.random()), models.Trait('id4_%d' % i, 4, timestamp)] data.append(models.Event(uuidutils.generate_uuid(), 'event_type%d' % i, timestamp, traits, {})) n += 1 conn.record_events(data) timestamp = timestamp + increment print('Added %d new events' % n) def main(): conf = service.prepare_service() parser = argparse.ArgumentParser( description='generate event data', ) parser.add_argument( '--interval', default=10, type=int, help='The period between events, in minutes.', ) parser.add_argument( '--start', default=31, type=int, help='The number of days in the past to start timestamps.', ) parser.add_argument( '--end', default=2, type=int, help='The number of days into the future to continue timestamps.', ) parser.add_argument( '--event_types', default=3, type=int, help='The number of unique event_types.', ) args = parser.parse_args() # Connect to the event database conn = storage.get_connection_from_config(conf) # Compute the correct time span start = datetime.datetime.utcnow() - datetime.timedelta(days=args.start) end = datetime.datetime.utcnow() + datetime.timedelta(days=args.end) make_test_data(conn=conn, start=start, end=end, interval=args.interval, event_types=args.event_types) if __name__ == '__main__': main() ././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1618398860.0 panko-10.0.0/tox.ini0000664000175000017500000000471100000000000014241 0ustar00zuulzuul00000000000000[tox] minversion = 2.0 skipsdist = True envlist = py{38}{,-mongodb,-mysql,-postgresql,-elasticsearch},pep8 ignore_basepython_conflict = True [testenv] basepython = python3 deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} {opts} {packages} usedevelop = True setenv = PANKO_BACKENDS=mongodb mysql postgresql mongodb: PANKO_BACKENDS=mongodb mysql: PANKO_BACKENDS=mysql postgresql: PANKO_BACKENDS=postgresql elasticsearch: PANKO_BACKENDS=elasticsearch # NOTE(tonyb): This project has chosen to *NOT* consume upper-constraints.txt passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE whitelist_externals = bash commands = oslo-config-generator --config-file=etc/panko/panko-config-generator.conf bash run-tests.sh {posargs} [testenv:cover] setenv = {[testenv]setenv} PYTHON=coverage run --source panko --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report [testenv:pep8] deps = hacking<3.1.0,>=3.0.0 doc8 commands = flake8 doc8 {posargs} [testenv:releasenotes] commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=etc/panko/panko-config-generator.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file=etc/panko/panko-policy-generator.conf [testenv:docs] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -W -b html doc/source doc/build/html [testenv:pdf-docs] deps = {[testenv:docs]deps} whitelist_externals = rm make commands = rm -rf doc/build/pdf sphinx-build -W -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:venv] commands = {posargs} setenv = PYTHONHASHSEED=0 [doc8] ignore = D000 ignore-path = .venv,.git,.tox,*panko/locale*,*lib/python*,panko.egg*,doc/build,doc/source/api,releasenotes/* [flake8] # W503 line break before binary operator # W504 line break after binary operator ignore = W503,W504 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build show-source = True [hacking] import_exceptions = panko.i18n [flake8:local-plugins] extension = C301 = checks:no_log_warn C302 = checks:no_os_popen paths = ./panko/hacking