aodh-2.0.6/0000775000567000056710000000000013076064720013600 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh.egg-info/0000775000567000056710000000000013076064720016205 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh.egg-info/entry_points.txt0000664000567000056710000000472213076064717021516 0ustar jenkinsjenkins00000000000000[aodh.alarm.rule] combination = aodh.api.controllers.v2.alarm_rules.combination:AlarmCombinationRule composite = aodh.api.controllers.v2.alarm_rules.composite:composite_rule event = aodh.api.controllers.v2.alarm_rules.event:AlarmEventRule gnocchi_aggregation_by_metrics_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricsByIdLookupRule gnocchi_aggregation_by_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricByResourcesLookupRule gnocchi_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:MetricOfResourceRule threshold = aodh.api.controllers.v2.alarm_rules.threshold:AlarmThresholdRule [aodh.evaluator] combination = aodh.evaluator.combination:CombinationEvaluator composite = aodh.evaluator.composite:CompositeEvaluator gnocchi_aggregation_by_metrics_threshold = aodh.evaluator.gnocchi:GnocchiAggregationMetricsThresholdEvaluator gnocchi_aggregation_by_resources_threshold = aodh.evaluator.gnocchi:GnocchiAggregationResourcesThresholdEvaluator gnocchi_resources_threshold = aodh.evaluator.gnocchi:GnocchiResourceThresholdEvaluator threshold = aodh.evaluator.threshold:ThresholdEvaluator [aodh.notifier] http = aodh.notifier.rest:RestAlarmNotifier https = aodh.notifier.rest:RestAlarmNotifier log = aodh.notifier.log:LogAlarmNotifier test = aodh.notifier.test:TestAlarmNotifier trust+http = aodh.notifier.trust:TrustRestAlarmNotifier trust+https = aodh.notifier.trust:TrustRestAlarmNotifier zaqar = aodh.notifier.zaqar:ZaqarAlarmNotifier [aodh.storage] hbase = aodh.storage.impl_hbase:Connection log = aodh.storage.impl_log:Connection mongodb = aodh.storage.impl_mongodb:Connection mysql = aodh.storage.impl_sqlalchemy:Connection mysql+pymysql = aodh.storage.impl_sqlalchemy:Connection postgresql = aodh.storage.impl_sqlalchemy:Connection sqlite = aodh.storage.impl_sqlalchemy:Connection [console_scripts] aodh-api = aodh.cmd.api:main aodh-data-migration = aodh.cmd.data_migration:main aodh-dbsync = aodh.cmd.storage:dbsync aodh-evaluator = aodh.cmd.alarm:evaluator aodh-expirer = aodh.cmd.storage:expirer aodh-listener = aodh.cmd.alarm:listener aodh-notifier = aodh.cmd.alarm:notifier [keystoneauth1.plugin] password-aodh-legacy = aodh.keystone_client:LegacyAodhKeystoneLoader [oslo.config.opts] aodh = aodh.opts:list_opts aodh-auth = aodh.opts:list_keystoneauth_opts [oslo.config.opts.defaults] aodh = aodh.conf.defaults:set_cors_middleware_defaults [tempest.test_plugins] aodh_tests = aodh.tests.tempest.plugin:AodhTempestPlugin aodh-2.0.6/aodh.egg-info/top_level.txt0000664000567000056710000000000513076064717020740 0ustar jenkinsjenkins00000000000000aodh aodh-2.0.6/aodh.egg-info/pbr.json0000664000567000056710000000005613076064717017672 0ustar jenkinsjenkins00000000000000{"is_release": true, "git_version": "5dd569d"}aodh-2.0.6/aodh.egg-info/SOURCES.txt0000664000567000056710000001747413076064720020106 0ustar jenkinsjenkins00000000000000.coveragerc .mailmap .testr.conf AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE MAINTAINERS README.rst aodh-config-generator.conf babel.cfg functions.sh pylintrc requirements.txt run-functional-tests.sh setup.cfg setup.py tox.ini aodh/__init__.py aodh/coordination.py aodh/event.py aodh/i18n.py aodh/keystone_client.py aodh/messaging.py aodh/opts.py aodh/queue.py aodh/rpc.py aodh/service.py aodh.egg-info/PKG-INFO aodh.egg-info/SOURCES.txt aodh.egg-info/dependency_links.txt aodh.egg-info/entry_points.txt aodh.egg-info/not-zip-safe aodh.egg-info/pbr.json aodh.egg-info/requires.txt aodh.egg-info/top_level.txt aodh/api/__init__.py aodh/api/app.py aodh/api/app.wsgi aodh/api/hooks.py aodh/api/middleware.py aodh/api/rbac.py aodh/api/controllers/__init__.py aodh/api/controllers/root.py aodh/api/controllers/v2/__init__.py aodh/api/controllers/v2/alarms.py aodh/api/controllers/v2/base.py aodh/api/controllers/v2/capabilities.py aodh/api/controllers/v2/query.py aodh/api/controllers/v2/root.py aodh/api/controllers/v2/utils.py aodh/api/controllers/v2/alarm_rules/__init__.py aodh/api/controllers/v2/alarm_rules/combination.py aodh/api/controllers/v2/alarm_rules/composite.py aodh/api/controllers/v2/alarm_rules/event.py aodh/api/controllers/v2/alarm_rules/gnocchi.py aodh/api/controllers/v2/alarm_rules/threshold.py aodh/cmd/__init__.py aodh/cmd/alarm.py aodh/cmd/api.py aodh/cmd/data_migration.py aodh/cmd/storage.py aodh/conf/__init__.py aodh/conf/defaults.py aodh/evaluator/__init__.py aodh/evaluator/combination.py aodh/evaluator/composite.py aodh/evaluator/event.py aodh/evaluator/gnocchi.py aodh/evaluator/threshold.py aodh/evaluator/utils.py aodh/hacking/__init__.py aodh/hacking/checks.py aodh/locale/aodh-log-error.pot aodh/locale/aodh-log-info.pot aodh/locale/aodh-log-warning.pot aodh/locale/aodh.pot aodh/locale/es/LC_MESSAGES/aodh-log-info.po aodh/locale/es/LC_MESSAGES/aodh-log-warning.po aodh/locale/pt/LC_MESSAGES/aodh-log-info.po aodh/locale/pt/LC_MESSAGES/aodh-log-warning.po aodh/locale/pt/LC_MESSAGES/aodh.po aodh/locale/ru/LC_MESSAGES/aodh-log-info.po aodh/locale/ru/LC_MESSAGES/aodh-log-warning.po aodh/locale/ru/LC_MESSAGES/aodh.po aodh/notifier/__init__.py aodh/notifier/log.py aodh/notifier/rest.py aodh/notifier/test.py aodh/notifier/trust.py aodh/notifier/zaqar.py aodh/storage/__init__.py aodh/storage/base.py aodh/storage/impl_hbase.py aodh/storage/impl_log.py aodh/storage/impl_mongodb.py aodh/storage/impl_sqlalchemy.py aodh/storage/models.py aodh/storage/pymongo_base.py aodh/storage/hbase/__init__.py aodh/storage/hbase/base.py aodh/storage/hbase/inmemory.py aodh/storage/hbase/migration.py aodh/storage/hbase/utils.py aodh/storage/mongo/__init__.py aodh/storage/mongo/utils.py aodh/storage/sqlalchemy/__init__.py aodh/storage/sqlalchemy/models.py aodh/storage/sqlalchemy/utils.py aodh/storage/sqlalchemy/alembic/alembic.ini aodh/storage/sqlalchemy/alembic/env.py aodh/storage/sqlalchemy/alembic/script.py.mako aodh/storage/sqlalchemy/alembic/versions/12fe8fac9fe4_initial_base.py aodh/storage/sqlalchemy/alembic/versions/bb07adac380_add_severity_to_alarm_history.py aodh/tests/__init__.py aodh/tests/base.py aodh/tests/constants.py aodh/tests/mocks.py aodh/tests/open-policy.json aodh/tests/policy.json-pre-mikita aodh/tests/test_hacking.py aodh/tests/functional/__init__.py aodh/tests/functional/db.py aodh/tests/functional/api/__init__.py aodh/tests/functional/api/test_app.py aodh/tests/functional/api/test_versions.py aodh/tests/functional/api/v2/__init__.py aodh/tests/functional/api/v2/policy.json-test aodh/tests/functional/api/v2/test_acl_scenarios.py aodh/tests/functional/api/v2/test_alarm_scenarios.py aodh/tests/functional/api/v2/test_app.py aodh/tests/functional/api/v2/test_capabilities.py aodh/tests/functional/api/v2/test_complex_query.py aodh/tests/functional/api/v2/test_complex_query_scenarios.py aodh/tests/functional/api/v2/test_query.py aodh/tests/functional/api/v2/test_wsme_custom_type.py aodh/tests/functional/gabbi/__init__.py aodh/tests/functional/gabbi/fixtures.py aodh/tests/functional/gabbi/gabbi_paste.ini aodh/tests/functional/gabbi/test_gabbi.py aodh/tests/functional/gabbi/test_gabbi_live.py aodh/tests/functional/gabbi/gabbits/alarms.yaml aodh/tests/functional/gabbi/gabbits/basic.yaml aodh/tests/functional/gabbi/gabbits/capabilities.yaml aodh/tests/functional/gabbi/gabbits/middleware.yaml aodh/tests/functional/gabbi/gabbits-live/alarms.yaml aodh/tests/functional/hooks/post_test_hook.sh aodh/tests/functional/storage/__init__.py aodh/tests/functional/storage/test_data_migration.py aodh/tests/functional/storage/test_get_connection.py aodh/tests/functional/storage/test_impl_hbase.py aodh/tests/functional/storage/test_impl_log.py aodh/tests/functional/storage/test_impl_mongodb.py aodh/tests/functional/storage/test_impl_sqlalchemy.py aodh/tests/functional/storage/test_storage_scenarios.py aodh/tests/functional/storage/sqlalchemy/__init__.py aodh/tests/functional/storage/sqlalchemy/test_migrations.py aodh/tests/functional/storage/sqlalchemy/test_models.py aodh/tests/tempest/__init__.py aodh/tests/tempest/config.py aodh/tests/tempest/plugin.py aodh/tests/tempest/api/__init__.py aodh/tests/tempest/api/base.py aodh/tests/tempest/api/test_alarming_api.py aodh/tests/tempest/api/test_alarming_api_negative.py aodh/tests/tempest/service/__init__.py aodh/tests/tempest/service/client.py aodh/tests/unit/__init__.py aodh/tests/unit/test_api_v2_capabilities.py aodh/tests/unit/test_bin.py aodh/tests/unit/test_coordination.py aodh/tests/unit/test_evaluator.py aodh/tests/unit/test_event.py aodh/tests/unit/test_messaging.py aodh/tests/unit/test_notifier.py aodh/tests/unit/test_rpc.py aodh/tests/unit/test_storage.py aodh/tests/unit/evaluator/__init__.py aodh/tests/unit/evaluator/base.py aodh/tests/unit/evaluator/test_base.py aodh/tests/unit/evaluator/test_combination.py aodh/tests/unit/evaluator/test_composite.py aodh/tests/unit/evaluator/test_event.py aodh/tests/unit/evaluator/test_gnocchi.py aodh/tests/unit/evaluator/test_threshold.py devstack/README.rst devstack/apache-aodh.template devstack/plugin.sh devstack/settings devstack/gate/gate_hook.sh doc/Makefile doc/source/architecture.rst doc/source/conf.py doc/source/configuration.rst doc/source/contributing.rst doc/source/glossary.rst doc/source/index.rst doc/source/testing.rst doc/source/_templates/.placeholder doc/source/api/index.rst doc/source/install/development.rst doc/source/install/index.rst doc/source/install/manual.rst doc/source/install/mod_wsgi.rst doc/source/install/storage.rst doc/source/webapi/index.rst doc/source/webapi/v2.rst etc/aodh/api_paste.ini etc/aodh/policy.json etc/apache2/aodh rally-jobs/README.rst rally-jobs/ceilometer.yaml rally-jobs/extra/README.rst rally-jobs/extra/fake.img rally-jobs/plugins/README.rst rally-jobs/plugins/plugin_sample.py releasenotes/notes/.placeholder releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml releasenotes/notes/bug1540395-reason-string-0aad56966007d0e3.yaml releasenotes/notes/composite-alarm-1b1ca9ea0e8f55c8.yaml releasenotes/notes/deprecate-nosql-backends-13079883eec7e8e5.yaml releasenotes/notes/fix-ceilometerclient-init-8bc7a6742937c3e2.yaml releasenotes/notes/fix-combination-alarms-8097adf08b837a50.yaml releasenotes/notes/fix-empty-statistics-3852da99b1c0b297.yaml releasenotes/notes/fix-gnocchi-aggregation-eval-7c2c1c67bdf2d11c.yaml releasenotes/notes/fix-rbac-50825144e0897d7d.yaml releasenotes/notes/gnocchi-client-a62ca5a0c717807e.yaml releasenotes/notes/keystone-v3-support-ffc0f804dbe9d7e9.yaml releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml releasenotes/notes/queue-communication-1b884feab4078dde.yaml releasenotes/notes/remove-eventlet-18ada1cff213af5e.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/liberty.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/pretty_tox.sh tools/test_hbase_table_utils.pyaodh-2.0.6/aodh.egg-info/not-zip-safe0000664000567000056710000000000113076064701020432 0ustar jenkinsjenkins00000000000000 aodh-2.0.6/aodh.egg-info/requires.txt0000664000567000056710000000226313076064717020616 0ustar jenkinsjenkins00000000000000retrying!=1.3.0,>=1.2.3 croniter>=0.3.4 jsonschema!=2.5.0,<3.0.0,>=2.0.0 keystonemiddleware>=2.2.0 gnocchiclient>=2.1.0 lxml>=2.3 oslo.context>=0.2.0 oslo.db>=1.12.0,!=4.13.1,!=4.13.2,!=4.15.0 oslo.config>=2.6.0 oslo.i18n>=1.5.0 oslo.log>=1.2.0 oslo.policy>=0.5.0 oslo.service>=0.1.0 PasteDeploy>=1.5.0 pbr<2.0,>=0.11 pecan>=0.8.0 oslo.messaging>2.6.1,!=2.8.0 oslo.middleware>=3.0.0 oslo.serialization>=1.4.0 oslo.utils>=1.9.0 python-ceilometerclient>=1.5.0 python-keystoneclient>=1.6.0 pytz>=2013.6 requests>=2.5.2 six>=1.9.0 stevedore>=1.5.0 tooz>=1.28.0 Werkzeug>=0.7 WebOb>=1.2.3 WSME>=0.8 [doc] oslosphinx>=2.5.0 # Apache-2.0 reno>=0.1.1 # Apache2 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 sphinxcontrib-httpdomain sphinxcontrib-pecanwsme>=0.8 [hbase] happybase!=0.7,>=0.5,<1.0.0 pymongo>=3.0.2 [mongodb] pymongo>=3.0.2 [mysql] SQLAlchemy<1.1.0,>=0.9.7 alembic>=0.7.2 PyMySQL>=0.6.2 # MIT License [postgresql] SQLAlchemy<1.1.0,>=0.9.7 alembic>=0.7.2 psycopg2 [test] overtest>=0.7.0 oslotest>=1.5.1 # Apache-2.0 coverage>=3.6 fixtures>=1.3.1 mock>=1.0 testrepository>=0.0.18 testresources>=0.2.4 # Apache-2.0/BSD testtools>=1.4.0 gabbi>=0.12.0 # Apache-2.0 tempest-lib>=0.6.1 python-subunit>=0.0.18 aodh-2.0.6/aodh.egg-info/PKG-INFO0000664000567000056710000000201413076064717017305 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: aodh Version: 2.0.6 Summary: OpenStack Telemetry Alarming Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: aodh ==== Release notes can be read online at: http://docs.openstack.org/developer/aodh/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/aodh/ The project home is at: http://launchpad.net/aodh Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Topic :: System :: Monitoring aodh-2.0.6/aodh.egg-info/dependency_links.txt0000664000567000056710000000000113076064717022261 0ustar jenkinsjenkins00000000000000 aodh-2.0.6/releasenotes/0000775000567000056710000000000013076064720016271 5ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/source/0000775000567000056710000000000013076064720017571 5ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/source/conf.py0000664000567000056710000002167713076064371021107 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Aodh Release Notes documentation build configuration file, created by # sphinx-quickstart on Tue Nov 3 17:40:50 2015. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'oslosphinx', 'reno.sphinxext', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Aodh Release Notes' copyright = u'2015, Aodh Developers' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. import pbr.version aodh_version = pbr.version.VersionInfo('aodh') # The full version, including alpha/beta/rc tags. release = aodh_version.version_string_with_vcs() # The short X.Y version. version = aodh_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'AodhReleaseNotesdoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'AodhReleaseNotes.tex', u'Aodh Release Notes Documentation', u'Aodh Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'aodhreleasenotes', u'Aodh Release Notes Documentation', [u'Aodh Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'AodhReleaseNotes', u'Aodh Release Notes Documentation', u'Aodh Developers', 'AodhReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] aodh-2.0.6/releasenotes/source/_templates/0000775000567000056710000000000013076064720021726 5ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/source/_templates/.placeholder0000664000567000056710000000000013076064371024201 0ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/source/_static/0000775000567000056710000000000013076064720021217 5ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/source/_static/.placeholder0000664000567000056710000000000013076064371023472 0ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/source/liberty.rst0000664000567000056710000000022213076064371021773 0ustar jenkinsjenkins00000000000000============================== Liberty Series Release Notes ============================== .. release-notes:: :branch: origin/stable/liberty aodh-2.0.6/releasenotes/source/unreleased.rst0000664000567000056710000000016113076064371022452 0ustar jenkinsjenkins00000000000000============================== Current Series Release Notes ============================== .. release-notes:: aodh-2.0.6/releasenotes/source/index.rst0000664000567000056710000000016413076064372021436 0ustar jenkinsjenkins00000000000000=================== Aodh Release Notes =================== .. toctree:: :maxdepth: 1 liberty unreleased aodh-2.0.6/releasenotes/notes/0000775000567000056710000000000013076064720017421 5ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/notes/.placeholder0000664000567000056710000000000013076064371021674 0ustar jenkinsjenkins00000000000000aodh-2.0.6/releasenotes/notes/fix-combination-alarms-8097adf08b837a50.yaml0000664000567000056710000000030013076064371027005 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1511252 `_] Fix an issue with combination alarms where it fails to evaluate all issues in the chain of alarms. aodh-2.0.6/releasenotes/notes/gnocchi-client-a62ca5a0c717807e.yaml0000664000567000056710000000026713076064371025410 0ustar jenkinsjenkins00000000000000--- features: - > Gnocchi dispatcher now uses client rather than direct http requests upgrade: - > gnocchiclient library is now a requirement if using ceilometer+gnocchi. aodh-2.0.6/releasenotes/notes/fix-gnocchi-aggregation-eval-7c2c1c67bdf2d11c.yaml0000664000567000056710000000025313076064371030263 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1513738 `_] Fix an issue where alarms using Gnocchi aggregations are not being evaluated. aodh-2.0.6/releasenotes/notes/bug1540395-reason-string-0aad56966007d0e3.yaml0000664000567000056710000000027713076064371026564 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1540395 `_] Fix reason string to properly handle transitions when one sample is outside of defined threshold. aodh-2.0.6/releasenotes/notes/fix-ceilometerclient-init-8bc7a6742937c3e2.yaml0000664000567000056710000000023413076064371027527 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1518447 `_] Fix to ensure ceilometerclient is properly initialised on startup. aodh-2.0.6/releasenotes/notes/partition-coordinator-improvement-ff1c257f69f120ac.yaml0000664000567000056710000000035013076064371031477 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1575530 `_] Patch was added to fix and improve the partition coordinator, make sure the input tasks can be correctly distributed to partition members. aodh-2.0.6/releasenotes/notes/deprecate-nosql-backends-13079883eec7e8e5.yaml0000664000567000056710000000044713076064371027331 0ustar jenkinsjenkins00000000000000--- deprecations: - > Drop support for NoSQL backends in Aodh. SQL is a prefectly sufficient backend for handling the scope of alarms. To maximise available resources, NoSQL backends are deprecated so developers do not need to worry about adding features to multiple backends. aodh-2.0.6/releasenotes/notes/fix-empty-statistics-3852da99b1c0b297.yaml0000664000567000056710000000025213076064371026560 0ustar jenkinsjenkins00000000000000--- fixes: - > [`bug 1539069 `_] Fix to handle scenario where no valid statistics exist for specified period. aodh-2.0.6/releasenotes/notes/fix-rbac-50825144e0897d7d.yaml0000664000567000056710000000101213076064371024002 0ustar jenkinsjenkins00000000000000--- upgrade: - > A new default policy.json is provided to properly handle RBAC control. Existing policy.json files may not grant the appropriate access. security: - > Patch was added to address inconsistent RBAC policy handling. Certain rules may not have been given appropriate access. fixes: - > [`bug 1504495 `_] Patch was added to address inconsistent RBAC policy handling. Certain rules may not have been given appropriate access. aodh-2.0.6/releasenotes/notes/composite-alarm-1b1ca9ea0e8f55c8.yaml0000664000567000056710000000045113076064371025757 0ustar jenkinsjenkins00000000000000--- features: - > Add a new composite type alarm, which allow users specifying a composite rule to define an alarm with multiple triggering conditions, using a combination of *and*, *or* relationships. The composite rule is composed of multiple threshold rules or gnocchi rules. aodh-2.0.6/releasenotes/notes/remove-eventlet-18ada1cff213af5e.yaml0000664000567000056710000000012113076064371026045 0ustar jenkinsjenkins00000000000000--- features: - > Remove eventlet from Aodh in favour of threaded approach aodh-2.0.6/releasenotes/notes/keystone-v3-support-ffc0f804dbe9d7e9.yaml0000664000567000056710000000010313076064371026655 0ustar jenkinsjenkins00000000000000--- features: - > Add support for Keystone v3 authentication aodh-2.0.6/releasenotes/notes/add-a-data-migration-tool-daa14b0cb5d4cc62.yaml0000664000567000056710000000045413076064371027543 0ustar jenkinsjenkins00000000000000--- upgrade: - > Add a tool for migrating alarm and alarm history data from NoSQL storage to SQL storage. The migration tool has been tested OK in devstack environment, but users need to be cautious with this, because the data migration between storage backends is a bit dangerous. aodh-2.0.6/releasenotes/notes/queue-communication-1b884feab4078dde.yaml0000664000567000056710000000121513076064371026657 0ustar jenkinsjenkins00000000000000--- features: - > Support for queue based communication between alarm evaluator service and alarm notifier services was added. Original implementation involved using RPC but there is significant overhead involved with using RPC. Work queues provided required functionality with better performance. upgrade: - > Queue based communication is the new default IPC protocol. RPC can still be used by choosing rpc as ipc_protocol option. Only one protocol can be run at any given time. deprecations: - > Because queues provide the equivalent functionality. RPC support is deprecated and will be removed after Mitaka. aodh-2.0.6/requirements.txt0000664000567000056710000000200513076064372017064 0ustar jenkinsjenkins00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. retrying!=1.3.0,>=1.2.3 # Apache-2.0 croniter>=0.3.4 # MIT License jsonschema!=2.5.0,<3.0.0,>=2.0.0 keystonemiddleware>=2.2.0 gnocchiclient>=2.1.0 # Apache-2.0 lxml>=2.3 oslo.context>=0.2.0 # Apache-2.0 oslo.db>=1.12.0,!=4.13.1,!=4.13.2,!=4.15.0 # Apache-2.0 oslo.config>=2.6.0 # Apache-2.0 oslo.i18n>=1.5.0 # Apache-2.0 oslo.log>=1.2.0 # Apache-2.0 oslo.policy>=0.5.0 # Apache-2.0 oslo.service>=0.1.0 # Apache-2.0 PasteDeploy>=1.5.0 pbr<2.0,>=0.11 pecan>=0.8.0 oslo.messaging>2.6.1,!=2.8.0 # Apache-2.0 oslo.middleware>=3.0.0 # Apache-2.0 oslo.serialization>=1.4.0 # Apache-2.0 oslo.utils>=1.9.0 # Apache-2.0 python-ceilometerclient>=1.5.0 python-keystoneclient>=1.6.0 pytz>=2013.6 requests>=2.5.2 six>=1.9.0 stevedore>=1.5.0 # Apache-2.0 tooz>=1.28.0 # Apache-2.0 Werkzeug>=0.7 # BSD License WebOb>=1.2.3 WSME>=0.8 aodh-2.0.6/pylintrc0000664000567000056710000000304213076064372015371 0ustar jenkinsjenkins00000000000000# The format of this file isn't really documented; just use --generate-rcfile [MASTER] # Add to the black list. It should be a base name, not a # path. ignore=openstack [Messages Control] # NOTE(justinsb): We might want to have a 2nd strict pylintrc in future # C0111: Don't require docstrings on every method # W0511: TODOs in code comments are fine. # W0142: *args and **kwargs are fine. # W0622: Redefining id is fine. # W0703: Catch "Exception". disable=C0111,W0511,W0142,W0622,W0703 [Basic] # Variable names can be 1 to 31 characters long, with lowercase and underscores variable-rgx=[a-z_][a-z0-9_]{0,30}$ # Argument names can be 2 to 31 characters long, with lowercase and underscores argument-rgx=[a-z_][a-z0-9_]{1,30}$ # Type attributes names can be 2 to 31 characters long, with lowercase and underscores attr-rgx=[a-z_][a-z0-9_]{1,30}$ # Method names should be at least 3 characters long and be lowercased with underscores method-rgx=([a-z_][a-z0-9_]{1,30}|setUp|tearDown)$ # Module names matching sahara-* are ok (files in bin/) module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(sahara-[a-z0-9_-]+))$ # Don't require docstrings on tests. no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$ [Design] max-public-methods=100 min-public-methods=0 max-args=6 [Variables] # List of additional names supposed to be defined in builtins. Remember that # you should avoid to define new builtins when possible. # _ is used by our localization additional-builtins=_ [TYPECHECK] generated-members=query,node_template,status_code,data aodh-2.0.6/etc/0000775000567000056710000000000013076064720014353 5ustar jenkinsjenkins00000000000000aodh-2.0.6/etc/aodh/0000775000567000056710000000000013076064720015266 5ustar jenkinsjenkins00000000000000aodh-2.0.6/etc/aodh/policy.json0000664000567000056710000000132513076064372017464 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "segregation": "rule:context_is_admin", "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s", "default": "rule:admin_or_owner", "telemetry:get_alarm": "rule:admin_or_owner", "telemetry:get_alarms": "rule:admin_or_owner", "telemetry:query_alarm": "rule:admin_or_owner", "telemetry:create_alarm": "", "telemetry:change_alarm": "rule:admin_or_owner", "telemetry:delete_alarm": "rule:admin_or_owner", "telemetry:get_alarm_state": "rule:admin_or_owner", "telemetry:change_alarm_state": "rule:admin_or_owner", "telemetry:alarm_history": "rule:admin_or_owner", "telemetry:query_alarm_history": "rule:admin_or_owner" } aodh-2.0.6/etc/aodh/api_paste.ini0000664000567000056710000000130613076064372017737 0ustar jenkinsjenkins00000000000000# aodh API WSGI Pipeline # Define the filters that make up the pipeline for processing WSGI requests # Note: This pipeline is PasteDeploy's term rather than aodh's pipeline # used for processing samples # Remove authtoken from the pipeline if you don't want to use keystone authentication [pipeline:main] pipeline = cors request_id authtoken api-server [app:api-server] paste.app_factory = aodh.api.app:app_factory [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory oslo_config_project = aodh [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = aodhaodh-2.0.6/etc/apache2/0000775000567000056710000000000013076064720015656 5ustar jenkinsjenkins00000000000000aodh-2.0.6/etc/apache2/aodh0000664000567000056710000000257613076064372016531 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # aodh API through mod_wsgi. # Note: If you are using a Debian-based system then the paths # "/var/log/httpd" and "/var/run/httpd" will use "apache2" instead # of "httpd". # # The number of processes and threads is an example only and should # be adjusted according to local requirements. Listen 8042 WSGIDaemonProcess aodh-api processes=2 threads=10 user=SOMEUSER display-name=%{GROUP} WSGIProcessGroup aodh-api WSGIScriptAlias / /var/www/aodh/app WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/httpd/aodh_error.log CustomLog /var/log/httpd/aodh_access.log combined WSGISocketPrefix /var/run/httpd aodh-2.0.6/HACKING.rst0000664000567000056710000000044213076064372015401 0ustar jenkinsjenkins00000000000000Aodh Style Commandments ======================= - Step 1: Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ - Step 2: Read on Aodh Specific Commandments -------------------------- - [C300] Check for oslo library imports use the non-namespaced packages aodh-2.0.6/.coveragerc0000664000567000056710000000012613076064371015722 0ustar jenkinsjenkins00000000000000[run] branch = True source = aodh omit = aodh/tests/* [report] ignore_errors = True aodh-2.0.6/tox.ini0000664000567000056710000000660513076064372015125 0ustar jenkinsjenkins00000000000000[tox] minversion = 1.6 skipsdist = True envlist = py34,py27,pep8 [testenv] deps = .[test] install_command = pip install -U {opts} {packages} usedevelop = True setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=aodh/tests/unit passenv = OS_TEST_TIMEOUT OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_LOG_CAPTURE commands = python setup.py testr --slowest --testr-args="{posargs}" oslo-config-generator --config-file=aodh-config-generator.conf whitelist_externals = bash # TODO(ityaptin): With separation tests to unit and functional folders we need # set environment variable OS_TEST_PATH=./aodh/tests/functional # in "py-" jobs [testenv:py27-hbase] deps = .[hbase,test] setenv = OS_TEST_PATH=aodh/tests/functional/ AODH_TEST_STORAGE_URL=hbase://__test__ [testenv:py27-mongodb] deps = .[mongodb,test] setenv = OS_TEST_PATH=aodh/tests/functional/ commands = overtest mongodb python setup.py testr --slowest --testr-args="{posargs}" [testenv:py27-mysql] deps = .[mysql,test] setenv = OS_TEST_PATH=aodh/tests/functional/ commands = overtest mysql python setup.py testr --slowest --testr-args="{posargs}" [testenv:py27-pgsql] deps = .[postgresql,test] setenv = OS_TEST_PATH=aodh/tests/functional/ commands = overtest postgresql python setup.py testr --slowest --testr-args="{posargs}" [testenv:functional] deps = .[mysql,postgresql,mongodb,test] setenv = VIRTUAL_ENV={envdir} OS_TEST_PATH=aodh/tests/functional/ GABBI_LIVE_FAIL_IF_NO_TEST=1 passenv = {[testenv]passenv} AODH_* commands = {toxinidir}/run-functional-tests.sh ./tools/pretty_tox.sh "{posargs}" # NOTE(chdent): The gabbi tests are also run under the primary tox # targets. This target simply provides a target to directly run just # gabbi tests without needing to discovery across the entire body of # tests. [testenv:gabbi] deps = .[mysql,test] setenv = OS_TEST_PATH=aodh/tests/functional/gabbi commands = overtest mysql python setup.py testr --testr-args="{posargs}" [testenv:cover] commands = overtest mysql python setup.py testr --slowest --coverage --testr-args="{posargs}" [testenv:pep8] deps = hacking<0.11,>=0.10.0 commands = flake8 # Check that .po and .pot files are valid: bash -c "find aodh -type f -regex '.*\.pot?' -print0|xargs -0 -n 1 msgfmt --check-format -o /dev/null" [testenv:releasenotes] deps = .[doc] commands = sphinx-build -a -E -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html [testenv:genconfig] commands = oslo-config-generator --config-file=aodh-config-generator.conf [testenv:docs] deps = .[doc] commands = python setup.py build_sphinx setenv = PYTHONHASHSEED=0 [testenv:venv] deps = .[doc] commands = {posargs} setenv = PYTHONHASHSEED=0 [testenv:debug] commands = bash -x oslo_debug_helper {posargs} [testenv:debug-mongodb] deps = .[mongodb,test] setenv = OS_TEST_PATH=aodh/tests/functional/ commands = overtest mongodb oslo_debug_helper {posargs} [testenv:debug-mysql] deps = .[mysql,test] setenv = OS_TEST_PATH=aodh/tests/functional/ commands = overtest mysql oslo_debug_helper {posargs} [testenv:debug-pgsql] deps = .[postgresql,test] setenv = OS_TEST_PATH=aodh/tests/functional/ commands = overtest postgresql oslo_debug_helper {posargs} [flake8] ignore = exclude=.venv,.git,.tox,dist,doc,./aodh/openstack/common,*lib/python*,*egg,build show-source = True [hacking] import_exceptions = aodh.i18n local-check-factory = aodh.hacking.checks.factory aodh-2.0.6/doc/0000775000567000056710000000000013076064720014345 5ustar jenkinsjenkins00000000000000aodh-2.0.6/doc/Makefile0000664000567000056710000001361313076064371016013 0ustar jenkinsjenkins00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " wadl to build a WADL file for api.openstack.org" clean: -rm -rf $(BUILDDIR)/* html: check-dependencies $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: check-dependencies check-dependencies: @python -c 'import sphinxcontrib.autohttp.flask' >/dev/null 2>&1 || (echo "ERROR: Missing Sphinx dependencies. Run: pip install sphinxcontrib-httpdomain" && exit 1) wadl: $(SPHINXBUILD) -b docbook $(ALLSPHINXOPTS) $(BUILDDIR)/wadl @echo @echo "Build finished. The WADL pages are in $(BUILDDIR)/wadl." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Ceilometer.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Ceilometer.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Ceilometer" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Ceilometer" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." aodh-2.0.6/doc/source/0000775000567000056710000000000013076064720015645 5ustar jenkinsjenkins00000000000000aodh-2.0.6/doc/source/webapi/0000775000567000056710000000000013076064720017114 5ustar jenkinsjenkins00000000000000aodh-2.0.6/doc/source/webapi/v2.rst0000664000567000056710000001014213076064372020176 0ustar jenkinsjenkins00000000000000.. docbookrestapi ============ V2 Web API ============ Capabilities ============ The Capabilities API allows you to directly discover which functions from the V2 API functionality, including the selectable aggregate functions, are supported by the currently configured storage driver. A capabilities query returns a flattened dictionary of properties with associated boolean values - a 'False' or absent value means that the corresponding feature is not available in the backend. .. rest-controller:: aodh.api.controllers.v2.capabilities:CapabilitiesController :webprefix: /v2/capabilities .. autotype:: aodh.api.controllers.v2.capabilities.Capabilities :members: .. _alarms-api: Alarms ====== .. rest-controller:: aodh.api.controllers.v2.alarms:AlarmsController :webprefix: /v2/alarms .. rest-controller:: aodh.api.controllers.v2.alarms:AlarmController :webprefix: /v2/alarms .. autotype:: aodh.api.controllers.v2.alarms.Alarm :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.threshold.AlarmThresholdRule :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.combination.AlarmCombinationRule :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.MetricOfResourceRule :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricByResourcesLookupRule :members: .. autotype:: aodh.api.controllers.v2.alarm_rules.gnocchi.AggregationMetricsByIdLookupRule :members: .. autotype:: aodh.api.controllers.v2.alarms.AlarmTimeConstraint :members: .. autotype:: aodh.api.controllers.v2.alarms.AlarmChange :members: Filtering Queries ================= The filter expressions of the query feature operate on the fields of *Alarm* and *AlarmChange*. The following comparison operators are supported: *=*, *!=*, *<*, *<=*, *>*, *>=* and *in*; and the following logical operators can be used: *and* *or* and *not*. The field names are validated against the database models. .. note:: The *not* operator has different meaning in Mongo DB and in SQL DB engine. If the *not* operator is applied on a non existent metadata field then the result depends on the DB engine. For example if {"not": {"metadata.nonexistent_field" : "some value"}} filter is used in a query the Mongo DB will return every Sample object as *not* operator evaluated true for every Sample where the given field does not exists. See more in the Mongod DB doc. On the other hand SQL based DB engine will return empty result as the join operation on the metadata table will return zero rows as the on clause of the join which tries to match on the metadata field name is never fulfilled. Complex Query supports defining the list of orderby expressions in the form of [{"field_name": "asc"}, {"field_name2": "desc"}, ...]. The number of the returned items can be bounded using the *limit* option. The *filter*, *orderby* and *limit* are all optional fields in a query. .. rest-controller:: aodh.api.controllers.v2.query:QueryAlarmsController :webprefix: /v2/query/alarms .. rest-controller:: aodh.api.controllers.v2.query:QueryAlarmHistoryController :webprefix: /v2/query/alarms/history .. autotype:: aodh.api.controllers.v2.query.ComplexQuery :members: Composite rule Alarm ==================== The *composite* type alarm allows users to specify a composite rule to define an alarm with multiple triggering conditions, using a combination of *and* and *or* relations. A composite rule is composed of multiple threshold rules or gnocchi rules. A sample composite alarm request form is as follows:: { "name": "test_composite", "type": "composite", "composite_rule": { "and": [THRESHOLD_RULE1, THRESHOLD_RULE2, { 'or': [THRESHOLD_RULE3, GNOCCHI_RULE1, GNOCCHI_RULE2, GNOCCHI_RULE3] }] } } A sub-rule in composite_rule is same as a threshold_rule in threshold alarm or a gnocchi_rule in gnocchi alarm. Additionally it has a mandatory *type* field to specify the rule type, like in the following sample:: { "threshold": 0.8, "meter_name": "cpu_util", "type": "threshold" } aodh-2.0.6/doc/source/webapi/index.rst0000664000567000056710000000241713076064372020764 0ustar jenkinsjenkins00000000000000========= Web API ========= .. toctree:: :maxdepth: 2 v2 You can get API version list via request to endpoint root path. For example:: curl -H "X-AUTH-TOKEN: fa2ec18631f94039a5b9a8b4fe8f56ad" http://127.0.0.1:8777 Sample response:: { "versions": { "values": [ { "id": "v2", "links": [ { "href": "http://127.0.0.1:8777/v2", "rel": "self" }, { "href": "http://docs.openstack.org/", "rel": "describedby", "type": "text/html" } ], "media-types": [ { "base": "application/json", "type": "application/vnd.openstack.telemetry-v2+json" }, { "base": "application/xml", "type": "application/vnd.openstack.telemetry-v2+xml" } ], "status": "stable", "updated": "2013-02-13T00:00:00Z" } ] } } aodh-2.0.6/doc/source/conf.py0000664000567000056710000002300413076064372017146 0ustar jenkinsjenkins00000000000000# # Aodh documentation build configuration file, created by # sphinx-quickstart on Thu Oct 27 11:38:59 2011. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os import subprocess BASE_DIR = os.path.dirname(os.path.abspath(__file__)) ROOT = os.path.abspath(os.path.join(BASE_DIR, "..", "..")) sys.path.insert(0, ROOT) sys.path.insert(0, BASE_DIR) # This is required for ReadTheDocs.org, but isn't a bad idea anyway. os.environ['DJANGO_SETTINGS_MODULE'] = 'openstack_dashboard.settings' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. # They can be extensions coming with Sphinx (named 'sphinx.ext.*') # or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinxcontrib.autohttp.flask', 'wsmeext.sphinxext', 'sphinx.ext.coverage', 'sphinx.ext.viewcode', 'sphinxcontrib.pecanwsme.rest', 'oslosphinx', 'stevedore.sphinxext', ] wsme_protocols = ['restjson', 'restxml'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. if os.getenv('HUDSON_PUBLISH_DOCS'): templates_path = ['_ga', '_templates'] else: templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Aodh' copyright = u'2012-2015, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['**/#*', '**~', '**/#*#', '**/*alembic*'] # The reST default role (used for this markup: `text`) # to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] primary_domain = 'py' nitpicky = False # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme_path = ['.'] # html_theme = '_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "nosidebar": "false" } # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". #html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' git_cmd = ["git", "log", "--pretty=format:'%ad, commit %h'", "--date=local", "-n1"] html_last_updated_fmt = subprocess.Popen(git_cmd, stdout=subprocess.PIPE).communicate()[0] # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Aodhdoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Aodh.tex', u'Aodh Documentation', u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'aodh', u'Aodh Documentation', [u'OpenStack'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'Aodh', u'Aodh Documentation', u'OpenStack', 'Aodh', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output -------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'Aodh' epub_author = u'OpenStack' epub_publisher = u'OpenStack' epub_copyright = u'2012-2015, OpenStack' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be an ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True aodh-2.0.6/doc/source/architecture.rst0000664000567000056710000000450313076064372021066 0ustar jenkinsjenkins00000000000000.. _architecture: ===================== System Architecture ===================== .. index:: single: agent; architecture double: compute agent; architecture double: collector; architecture double: data store; architecture double: database; architecture double: API; architecture High-Level Architecture ======================= Each of Aodh's services are designed to scale horizontally. Additional workers and nodes can be added depending on the expected load. It provides daemons to evaluate and notify based on defined alarming rules. Evaluating the data =================== Alarming Service ---------------- The alarming component of Aodh, first delivered in the Havana version, allows you to set alarms based on threshold evaluation for a collection of samples. An alarm can be set on a single meter, or on a combination. For example, you may want to trigger an alarm when the memory consumption reaches 70% on a given instance if the instance has been up for more than 10 min. To setup an alarm, you will call :ref:`Aodh's API server ` specifying the alarm conditions and an action to take. Of course, if you are not administrator of the cloud itself, you can only set alarms on meters for your own components. There can be multiple form of actions, but two have been implemented so far: 1. :term:`HTTP callback`: you provide a URL to be called whenever the alarm has been set off. The payload of the request contains all the details of why the alarm was triggered. 2. :term:`log`: mostly useful for debugging, stores alarms in a log file. For more details on this, we recommend that you read the blog post by Mehdi Abaakouk `Autoscaling with Heat and Ceilometer`_. Particular attention should be given to the section "Some notes about deploying alarming" as the database setup (using a separate database from the one used for metering) will be critical in all cases of production deployment. .. _Autoscaling with Heat and Ceilometer: http://techs.enovance.com/5991/autoscaling-with-heat-and-ceilometer Alarm Rules =========== .. list-plugins:: aodh.alarm.rule :detailed: Alarm Evaluators ================ .. list-plugins:: aodh.evaluator :detailed: Alarm Notifiers =============== .. list-plugins:: aodh.notifier :detailed: Alarm Storage =============== .. list-plugins:: aodh.storage :detailed: aodh-2.0.6/doc/source/contributing.rst0000664000567000056710000000232313076064371021110 0ustar jenkinsjenkins00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _contributing: ====================== Contributing to Aodh ====================== Aodh follows the same workflow as other OpenStack projects. To start contributing to Aodh, please follow the workflow found here_. .. _here: https://wiki.openstack.org/wiki/Gerrit_Workflow Project Hosting Details ======================= :Bug tracker: http://launchpad.net/aodh :Mailing list: http://lists.openstack.org/cgi-bin/mailman/listinfo/openstack-dev (prefix subjects with ``[Aodh]`` for faster responses) :Code Hosting: https://git.openstack.org/cgit/openstack/aodh/ :Code Review: https://review.openstack.org/#/q/status:open+project:openstack/aodh,n,z aodh-2.0.6/doc/source/glossary.rst0000664000567000056710000000302713076064372020247 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network (DreamHost) Copyright 2013 eNovance Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ========== Glossary ========== .. glossary:: alarm An action triggered whenever a meter reaches a certain threshold. API server HTTP REST API service for Aodh. ceilometer From Wikipedia [#]_: A ceilometer is a device that uses a laser or other light source to determine the height of a cloud base. http callback HTTP callback is used for calling a predefined URL, whenever an alarm has been set off. The payload of the request contains all the details of why the alarm was triggered. log Logging is one of the alarm actions that is useful mostly for debugging, it stores the alarms in a log file. project The OpenStack tenant or project. resource The OpenStack entity being metered (e.g. instance, volume, image, etc). user An OpenStack user. .. [#] http://en.wikipedia.org/wiki/Ceilometer aodh-2.0.6/doc/source/api/0000775000567000056710000000000013076064720016416 5ustar jenkinsjenkins00000000000000aodh-2.0.6/doc/source/api/index.rst0000664000567000056710000000014713076064371020263 0ustar jenkinsjenkins00000000000000=================== Source Code Index =================== .. toctree:: :maxdepth: 1 autoindex aodh-2.0.6/doc/source/testing.rst0000664000567000056710000000523013076064372020057 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================= Running the Tests ================= Aodh includes an extensive set of automated unit tests which are run through tox_. 1. Install ``tox``:: $ sudo pip install tox 2. On Ubuntu install ``mongodb`` and ``libmysqlclient-dev`` packages:: $ sudo apt-get install mongodb $ sudo apt-get install libmysqlclient-dev For Fedora20 there is no ``libmysqlclient-dev`` package, so you’ll need to install ``mariadb-devel.x86-64`` (or ``mariadb-devel.i386``) instead:: $ sudo yum install mongodb $ sudo yum install mariadb-devel.x86_64 3. Install the test dependencies:: $ sudo pip install -e /opt/stack/aodh[test] 4. Run the unit and code-style tests:: $ cd /opt/stack/aodh $ tox -e py27,pep8 As tox is a wrapper around testr, it also accepts the same flags as testr. See the `testr documentation`_ for details about these additional flags. .. _testr documentation: https://testrepository.readthedocs.org/en/latest/MANUAL.html Use a double hyphen to pass options to testr. For example, to run only tests under tests/api/v2:: $ tox -e py27 -- api.v2 To debug tests (ie. break into pdb debugger), you can use ''debug'' tox environment. Here's an example, passing the name of a test since you'll normally only want to run the test that hits your breakpoint:: $ tox -e debug aodh.tests.test_bin For reference, the ``debug`` tox environment implements the instructions here: https://wiki.openstack.org/wiki/Testr#Debugging_.28pdb.29_Tests 5. There is a growing suite of tests which use a tool called `gabbi`_ to test and validate the behavior of the Aodh API. These tests are run when using the usual ``py27`` tox target but if desired they can be run by themselves:: $ tox -e gabbi The YAML files used to drive the gabbi tests can be found in ``aodh/tests/gabbi/gabbits``. If you are adding to or adjusting the API you should consider adding tests here. .. _gabbi: https://gabbi.readthedocs.org/ .. seealso:: * tox_ .. _tox: http://tox.testrun.org/latest/ aodh-2.0.6/doc/source/install/0000775000567000056710000000000013076064720017313 5ustar jenkinsjenkins00000000000000aodh-2.0.6/doc/source/install/mod_wsgi.rst0000664000567000056710000000426313076064372021665 0ustar jenkinsjenkins00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =================================== Installing the API behind mod_wsgi =================================== Aodh comes with a few example files for configuring the API service to run behind Apache with ``mod_wsgi``. app.wsgi ======== The file ``aodh/api/app.wsgi`` sets up the V2 API WSGI application. The file is installed with the rest of the Aodh application code, and should not need to be modified. etc/apache2/aodh ================ The ``etc/apache2/aodh`` file contains example settings that work with a copy of Aodh installed via devstack. .. literalinclude:: ../../../etc/apache2/aodh 1. On deb-based systems copy or symlink the file to ``/etc/apache2/sites-available``. For rpm-based systems the file will go in ``/etc/httpd/conf.d``. 2. Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and ``group`` values to a appropriate user on your server. In many installations ``aodh`` will be correct. 3. Enable the Aodh site. On deb-based systems:: $ a2ensite aodh $ service apache2 reload On rpm-based systems:: $ service httpd reload Limitation ========== As Aodh is using Pecan and Pecan's DebugMiddleware doesn't support multiple processes, there is no way to set debug mode in the multiprocessing case. To allow multiple processes the DebugMiddleware may be turned off by setting ``pecan_debug`` to ``False`` in the ``api`` section of ``aodh.conf``. For other WSGI setup you can refer to the `pecan deployment`_ documentation. .. _`pecan deployment`: http://pecan.readthedocs.org/en/latest/deployment.html#deployment aodh-2.0.6/doc/source/install/development.rst0000664000567000056710000000326613076064371022400 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. =============================== Installing development sandbox =============================== Configuring devstack ==================== .. index:: double: installing; devstack 1. Download devstack_. 2. Create a ``local.conf`` file as input to devstack. .. note:: ``local.conf`` replaces the former configuration file called ``localrc``. If you used localrc before, remove it to switch to using the new file. For further information see the `localrc description page `_ or `devstack configuration `_. 3. The aodh services are not enabled by default, so they must be enabled in ``local.conf`` before running ``stack.sh``. This example ``local.conf`` file shows all of the settings required for aodh:: [[local|localrc]] # Enable the aodh alarming services enable_plugin aodh https://git.openstack.org/openstack/aodh master .. _devstack: http://docs.openstack.org/developer/devstack aodh-2.0.6/doc/source/install/manual.rst0000664000567000056710000000671413076064372021335 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _installing_manually: ===================== Installing Manually ===================== Storage Backend Installation ============================ This step is a prerequisite for the collector, notification agent and API services. You may use one of the listed database backends below to store Aodh data. .. note:: Please notice, MongoDB requires pymongo_ to be installed on the system. The required minimum version of pymongo is 2.4. .. MongoDB ------- The recommended Aodh storage backend is `MongoDB`. Follow the instructions to install the MongoDB_ package for your operating system, then start the service. The required minimum version of MongoDB is 2.4. To use MongoDB as the storage backend, change the 'database' section in aodh.conf as follows:: [database] connection = mongodb://username:password@host:27017/aodh If MongoDB is configured in replica set mode, add `?replicaSet=` in your connection URL:: [database] connection = mongodb://username:password@host:27017/aodh?replicaSet=foobar SQLalchemy-supported DBs ------------------------ You may alternatively use `MySQL` (or any other SQLAlchemy-supported DB like `PostgreSQL`). In case of SQL-based database backends, you need to create a `aodh` database first and then initialise it by running:: aodh-dbsync To use MySQL as the storage backend, change the 'database' section in aodh.conf as follows:: [database] connection = mysql+pymysql://username:password@host/aodh?charset=utf8 HBase ----- HBase backend is implemented to use HBase Thrift interface, therefore it is mandatory to have the HBase Thrift server installed and running. To start the Thrift server, please run the following command:: ${HBASE_HOME}/bin/hbase thrift start The implementation uses `HappyBase`_, which is a wrapper library used to interact with HBase via Thrift protocol. You can verify the thrift connection by running a quick test from a client:: import happybase conn = happybase.Connection(host=$hbase-thrift-server, port=9090, table_prefix=None) print conn.tables() # this returns a list of HBase tables in your HBase server .. note:: HappyBase version 0.5 or greater is required. Additionally, version 0.7 is not currently supported. .. In case of HBase, the needed database tables (`project`, `user`, `resource`, `meter`, `alarm`, `alarm_h`) should be created manually with `f` column family for each one. To use HBase as the storage backend, change the 'database' section in aodh.conf as follows:: [database] connection = hbase://hbase-thrift-host:9090 .. _HappyBase: http://happybase.readthedocs.org/en/latest/index.html# .. _MongoDB: http://www.mongodb.org/ .. _pymongo: https://pypi.python.org/pypi/pymongo/ aodh-2.0.6/doc/source/install/index.rst0000664000567000056710000000142113076064372021155 0ustar jenkinsjenkins00000000000000.. Copyright 2013 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _install: ================= Installing Aodh ================= .. toctree:: :maxdepth: 2 development manual storage mod_wsgi aodh-2.0.6/doc/source/install/storage.rst0000664000567000056710000000014313076064372021512 0ustar jenkinsjenkins00000000000000================= Storage Drivers ================= .. list-plugins:: aodh.storage :detailed: aodh-2.0.6/doc/source/_templates/0000775000567000056710000000000013076064720020002 5ustar jenkinsjenkins00000000000000aodh-2.0.6/doc/source/_templates/.placeholder0000664000567000056710000000000013076064371022255 0ustar jenkinsjenkins00000000000000aodh-2.0.6/doc/source/configuration.rst0000664000567000056710000000517313076064372021257 0ustar jenkinsjenkins00000000000000.. Copyright 2012 New Dream Network, LLC (DreamHost) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============= Configuration ============= Configure Aodh by editing `/etc/aodh/aodh.conf`. No config file is provided with the source code, but one can be easily created by running: :: tox -e genconfig This command will create an `etc/aodh/aodh.conf` file which can be used as a base for the default configuration file at `/etc/aodh/aodh.conf`. For the list and description of configuration options that can be set for Aodh in order to set up the services please see the `Telemetry section `_ in the OpenStack Manuals Configuration Reference. HBase =================== This storage implementation uses Thrift HBase interface. The default Thrift connection settings should be changed to support using ConnectionPool in HBase. To ensure proper configuration, please add the following lines to the `hbase-site.xml` configuration file:: hbase.thrift.minWorkerThreads 200 For pure development purposes, you can use HBase from Apache_ or some other vendor like Cloudera or Hortonworks. To verify your installation, you can use the `list` command in `HBase shell`, to list the tables in your HBase server, as follows:: $ ${HBASE_HOME}/bin/hbase shell hbase> list .. note:: This driver has been tested against HBase 0.94.2/CDH 4.2.0, HBase 0.94.4/HDP 1.2, HBase 0.94.18/Apache, HBase 0.94.5/Apache, HBase 0.96.2/Apache and HBase 0.98.0/Apache. Versions earlier than 0.92.1 are not supported due to feature incompatibility. To find out more about supported storage backends please take a look on the :doc:`install/manual/` guide. .. note:: If you are changing the configuration on the fly to use HBase, as a storage backend, you will need to restart the Aodh services that use the database to allow the changes to take affect, i.e. the collector and API services. .. _Apache: https://hbase.apache.org/book/quickstart.html aodh-2.0.6/doc/source/index.rst0000664000567000056710000000263213076064372017514 0ustar jenkinsjenkins00000000000000.. Copyright 2012 Nicolas Barcet for Canonical Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ============================================ Welcome to the Aodh developer documentation! ============================================ What is the purpose of the project and vision for it? ===================================================== * Provide alarms and notifications based on metrics. This documentation offers information on how Aodh works and how to contribute to the project. Overview ======== .. toctree:: :maxdepth: 2 architecture webapi/index Developer Documentation ======================= .. toctree:: :maxdepth: 2 install/index configuration testing contributing Appendix ======== .. toctree:: :maxdepth: 1 glossary api/index .. update index Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` aodh-2.0.6/rally-jobs/0000775000567000056710000000000013076064720015656 5ustar jenkinsjenkins00000000000000aodh-2.0.6/rally-jobs/ceilometer.yaml0000664000567000056710000001177613076064371020710 0ustar jenkinsjenkins00000000000000--- CeilometerAlarms.create_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.create_and_delete_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.create_and_list_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.create_and_update_alarm: - args: meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerAlarms.list_alarms: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerMeters.list_meters: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerResource.list_resources: - runner: type: "constant" times: 10 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerStats.create_meter_and_get_stats: - args: user_id: "user-id" resource_id: "resource-id" counter_volume: 1.0 counter_unit: "" counter_type: "cumulative" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_alarms: - args: filter: {"and": [{"!=": {"state": "dummy_state"}},{"=": {"type": "threshold"}}]} orderby: !!null limit: 10 meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_alarm_history: - args: orderby: !!null limit: !!null meter_name: "ram_util" threshold: 10.0 type: "threshold" statistic: "avg" alarm_actions: ["http://localhost:8776/alarm"] ok_actions: ["http://localhost:8776/ok"] insufficient_data_actions: ["http://localhost:8776/notok"] runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 CeilometerQueries.create_and_query_samples: - args: filter: {"=": {"counter_unit": "instance"}} orderby: !!null limit: 10 counter_name: "cpu_util" counter_type: "gauge" counter_unit: "instance" counter_volume: "1.0" resource_id: "resource_id" runner: type: "constant" times: 20 concurrency: 10 context: users: tenants: 1 users_per_tenant: 1 sla: max_failure_percent: 0 aodh-2.0.6/rally-jobs/plugins/0000775000567000056710000000000013076064720017337 5ustar jenkinsjenkins00000000000000aodh-2.0.6/rally-jobs/plugins/plugin_sample.py0000664000567000056710000000166713076064371022564 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Sample of plugin for Aodh. For more Aodh related benchmarks take a look here: github.com/openstack/rally/blob/master/rally/benchmark/scenarios/aodh/ About plugins: https://rally.readthedocs.org/en/latest/plugins.html Rally concepts https://wiki.openstack.org/wiki/Rally/Concepts """ from rally.benchmark.scenarios import base class AodhPlugin(base.Scenario): pass aodh-2.0.6/rally-jobs/plugins/README.rst0000664000567000056710000000060613076064371021032 0ustar jenkinsjenkins00000000000000Rally plugins ============= All *.py modules from this directory will be auto-loaded by Rally and all plugins will be discoverable. There is no need of any extra configuration and there is no difference between writing them here and in rally code base. Note that it is better to push all interesting and useful benchmarks to Rally code base, this simplifies administration for Operators. aodh-2.0.6/rally-jobs/extra/0000775000567000056710000000000013076064720017001 5ustar jenkinsjenkins00000000000000aodh-2.0.6/rally-jobs/extra/fake.img0000664000567000056710000000000013076064371020375 0ustar jenkinsjenkins00000000000000aodh-2.0.6/rally-jobs/extra/README.rst0000664000567000056710000000025513076064371020474 0ustar jenkinsjenkins00000000000000Extra files =========== All files from this directory will be copy pasted to gates, so you are able to use absolute path in rally tasks. Files will be in ~/.rally/extra/* aodh-2.0.6/rally-jobs/README.rst0000664000567000056710000000155413076064371017354 0ustar jenkinsjenkins00000000000000Rally job related files ======================= This directory contains rally tasks and plugins that are run by OpenStack CI. Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * aodh is a task that is run in gates against aodh Useful links ------------ * More about Rally: https://rally.readthedocs.org/en/latest/ * How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html * About plugins: https://rally.readthedocs.org/en/latest/plugins.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins aodh-2.0.6/tools/0000775000567000056710000000000013076064720014740 5ustar jenkinsjenkins00000000000000aodh-2.0.6/tools/pretty_tox.sh0000775000567000056710000000065213076064371017525 0ustar jenkinsjenkins00000000000000#!/usr/bin/env bash set -o pipefail TESTRARGS=$1 # --until-failure is not compatible with --subunit see: # # https://bugs.launchpad.net/testrepository/+bug/1411804 # # this work around exists until that is addressed if [[ "$TESTARGS" =~ "until-failure" ]]; then python setup.py testr --slowest --testr-args="$TESTRARGS" else python setup.py testr --slowest --testr-args="--subunit $TESTRARGS" | subunit-trace -f fi aodh-2.0.6/tools/test_hbase_table_utils.py0000664000567000056710000000237113076064372022030 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys from oslo_config import cfg from aodh import storage def main(argv): cfg.CONF([], project='aodh') if os.getenv("AODH_TEST_STORAGE_URL"): url = ("%s?table_prefix=%s" % (os.getenv("AODH_TEST_STORAGE_URL"), os.getenv("AODH_TEST_HBASE_TABLE_PREFIX", "test"))) cfg.CONF.set_override("connection", url, group="database", enforce_type=True) alarm_conn = storage.get_connection_from_config(cfg.CONF) for arg in argv: if arg == "--upgrade": alarm_conn.upgrade() if arg == "--clear": alarm_conn.clear() if __name__ == '__main__': main(sys.argv[1:]) aodh-2.0.6/functions.sh0000664000567000056710000000067113076064372016153 0ustar jenkinsjenkins00000000000000function clean_exit(){ local error_code="$?" rm -rf "$1" kill $(jobs -p) return $error_code } check_for_cmd () { if ! which "$1" >/dev/null 2>&1 then echo "Could not find $1 command" 1>&2 exit 1 fi } wait_for_line () { while read line do echo "$line" | grep -q "$1" && break done < "$2" # Read the fifo for ever otherwise process would block cat "$2" >/dev/null & } aodh-2.0.6/aodh/0000775000567000056710000000000013076064720014513 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/0000775000567000056710000000000013076064720015655 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/constants.py0000664000567000056710000000123513076064371020246 0ustar jenkinsjenkins00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime MIN_DATETIME = datetime.datetime(datetime.MINYEAR, 1, 1) aodh-2.0.6/aodh/tests/tempest/0000775000567000056710000000000013076064720017336 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/tempest/api/0000775000567000056710000000000013076064720020107 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/tempest/api/test_alarming_api.py0000664000567000056710000001207513076064372024153 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from aodh.tests.tempest.api import base class TelemetryAlarmingAPITest(base.BaseAlarmingTest): @classmethod def resource_setup(cls): super(TelemetryAlarmingAPITest, cls).resource_setup() cls.rule = {'meter_name': 'cpu_util', 'comparison_operator': 'gt', 'threshold': 80.0, 'period': 70} for i in range(2): cls.create_alarm(threshold_rule=cls.rule) @test.idempotent_id('1c918e06-210b-41eb-bd45-14676dd77cd7') def test_alarm_list(self): # List alarms alarm_list = self.alarming_client.list_alarms() # Verify created alarm in the list fetched_ids = [a['alarm_id'] for a in alarm_list] missing_alarms = [a for a in self.alarm_ids if a not in fetched_ids] self.assertEqual(0, len(missing_alarms), "Failed to find the following created alarm(s)" " in a fetched list: %s" % ', '.join(str(a) for a in missing_alarms)) @test.idempotent_id('1297b095-39c1-4e74-8a1f-4ae998cedd68') def test_create_update_get_delete_alarm(self): # Create an alarm alarm_name = data_utils.rand_name('telemetry_alarm') body = self.alarming_client.create_alarm( name=alarm_name, type='threshold', threshold_rule=self.rule) self.assertEqual(alarm_name, body['name']) alarm_id = body['alarm_id'] self.assertDictContainsSubset(self.rule, body['threshold_rule']) # Update alarm with new rule and new name new_rule = {'meter_name': 'cpu', 'comparison_operator': 'eq', 'threshold': 70.0, 'period': 60} alarm_name_updated = data_utils.rand_name('telemetry-alarm-update') body = self.alarming_client.update_alarm( alarm_id, threshold_rule=new_rule, name=alarm_name_updated, type='threshold') self.assertEqual(alarm_name_updated, body['name']) self.assertDictContainsSubset(new_rule, body['threshold_rule']) # Get and verify details of an alarm after update body = self.alarming_client.show_alarm(alarm_id) self.assertEqual(alarm_name_updated, body['name']) self.assertDictContainsSubset(new_rule, body['threshold_rule']) # Get history for the alarm and verify the same body = self.alarming_client.show_alarm_history(alarm_id) self.assertEqual("rule change", body[0]['type']) self.assertIn(alarm_name_updated, body[0]['detail']) self.assertEqual("creation", body[1]['type']) self.assertIn(alarm_name, body[1]['detail']) # Delete alarm and verify if deleted self.alarming_client.delete_alarm(alarm_id) self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm, alarm_id) @test.idempotent_id('aca49486-70bb-4016-87e0-f6131374f742') def test_set_get_alarm_state(self): alarm_states = ['ok', 'alarm', 'insufficient data'] alarm = self.create_alarm(threshold_rule=self.rule) # Set alarm state and verify new_state =\ [elem for elem in alarm_states if elem != alarm['state']][0] state = self.alarming_client.alarm_set_state(alarm['alarm_id'], new_state) self.assertEqual(new_state, state.data) # Get alarm state and verify state = self.alarming_client.show_alarm_state(alarm['alarm_id']) self.assertEqual(new_state, state.data) @test.idempotent_id('08d7e45a-1344-4e5c-ba6f-f6cbb77f55ba') def test_create_delete_alarm_with_combination_rule(self): rule = {"alarm_ids": self.alarm_ids, "operator": "or"} # Verifies alarm create alarm_name = data_utils.rand_name('combination_alarm') body = self.alarming_client.create_alarm(name=alarm_name, combination_rule=rule, type='combination') self.assertEqual(alarm_name, body['name']) alarm_id = body['alarm_id'] self.assertDictContainsSubset(rule, body['combination_rule']) # Verify alarm delete self.alarming_client.delete_alarm(alarm_id) self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm, alarm_id) aodh-2.0.6/aodh/tests/tempest/api/test_alarming_api_negative.py0000664000567000056710000000553113076064372026034 0ustar jenkinsjenkins00000000000000# Copyright 2015 GlobalLogic. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from tempest.common.utils import data_utils from tempest.lib import exceptions as lib_exc from tempest import test from aodh.tests.tempest.api import base class TelemetryAlarmingNegativeTest(base.BaseAlarmingTest): """Negative tests for show_alarm, update_alarm, show_alarm_history tests ** show non-existent alarm ** show the deleted alarm ** delete deleted alarm ** update deleted alarm """ @test.attr(type=['negative']) @test.idempotent_id('668743d5-08ad-4480-b2b8-15da34f81e7e') def test_get_non_existent_alarm(self): # get the non-existent alarm non_existent_id = str(uuid.uuid4()) self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm, non_existent_id) @test.attr(type=['negative']) @test.idempotent_id('ef45000d-0a72-4781-866d-4cb7bf2582ae') def test_get_update_show_history_delete_deleted_alarm(self): # get, update and delete the deleted alarm alarm_name = data_utils.rand_name('telemetry_alarm') rule = {'meter_name': 'cpu', 'comparison_operator': 'eq', 'threshold': 100.0, 'period': 90} body = self.alarming_client.create_alarm( name=alarm_name, type='threshold', threshold_rule=rule) alarm_id = body['alarm_id'] self.alarming_client.delete_alarm(alarm_id) # get the deleted alarm self.assertRaises(lib_exc.NotFound, self.alarming_client.show_alarm, alarm_id) # update the deleted alarm updated_alarm_name = data_utils.rand_name('telemetry_alarm_updated') updated_rule = {'meter_name': 'cpu_new', 'comparison_operator': 'eq', 'threshold': 70, 'period': 50} self.assertRaises(lib_exc.NotFound, self.alarming_client.update_alarm, alarm_id, threshold_rule=updated_rule, name=updated_alarm_name, type='threshold') # delete the deleted alarm self.assertRaises(lib_exc.NotFound, self.alarming_client.delete_alarm, alarm_id) aodh-2.0.6/aodh/tests/tempest/api/__init__.py0000664000567000056710000000000013076064371022210 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/tempest/api/base.py0000664000567000056710000000410513076064372021376 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest.common.utils import data_utils from tempest import config from tempest.lib import exceptions as lib_exc import tempest.test from aodh.tests.tempest.service import client CONF = config.CONF class BaseAlarmingTest(tempest.test.BaseTestCase): """Base test case class for all Alarming API tests.""" credentials = ['primary'] client_manager = client.Manager @classmethod def skip_checks(cls): super(BaseAlarmingTest, cls).skip_checks() if not CONF.service_available.aodh_plugin: raise cls.skipException("Aodh support is required") @classmethod def setup_clients(cls): super(BaseAlarmingTest, cls).setup_clients() cls.alarming_client = cls.os.alarming_client @classmethod def resource_setup(cls): super(BaseAlarmingTest, cls).resource_setup() cls.alarm_ids = [] @classmethod def create_alarm(cls, **kwargs): body = cls.alarming_client.create_alarm( name=data_utils.rand_name('telemetry_alarm'), type='threshold', **kwargs) cls.alarm_ids.append(body['alarm_id']) return body @staticmethod def cleanup_resources(method, list_of_ids): for resource_id in list_of_ids: try: method(resource_id) except lib_exc.NotFound: pass @classmethod def resource_cleanup(cls): cls.cleanup_resources(cls.alarming_client.delete_alarm, cls.alarm_ids) super(BaseAlarmingTest, cls).resource_cleanup() aodh-2.0.6/aodh/tests/tempest/config.py0000664000567000056710000000265613076064372021171 0ustar jenkinsjenkins00000000000000# # Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg service_available_group = cfg.OptGroup(name="service_available", title="Available OpenStack Services") ServiceAvailableGroup = [ cfg.BoolOpt("aodh_plugin", default=True, help="Whether or not Aodh is expected to be available"), ] alarming_group = cfg.OptGroup(name='alarming_plugin', title='Alarming Service Options') AlarmingGroup = [ cfg.StrOpt('catalog_type', default='alarming', help="Catalog type of the Alarming service."), cfg.StrOpt('endpoint_type', default='publicURL', choices=['public', 'admin', 'internal', 'publicURL', 'adminURL', 'internalURL'], help="The endpoint type to use for the alarming service."), ] aodh-2.0.6/aodh/tests/tempest/__init__.py0000664000567000056710000000000013076064371021437 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/tempest/plugin.py0000664000567000056710000000304113076064372021207 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempest import config from tempest.test_discover import plugins import aodh from aodh.tests.tempest import config as tempest_config class AodhTempestPlugin(plugins.TempestPlugin): def load_tests(self): base_path = os.path.split(os.path.dirname( os.path.abspath(aodh.__file__)))[0] test_dir = "aodh/tests/tempest" full_test_dir = os.path.join(base_path, test_dir) return full_test_dir, base_path def register_opts(self, conf): config.register_opt_group(conf, tempest_config.service_available_group, tempest_config.ServiceAvailableGroup) config.register_opt_group(conf, tempest_config.alarming_group, tempest_config.AlarmingGroup) def get_opt_lists(self): return [(tempest_config.alarming_group.name, tempest_config.AlarmingGroup)] aodh-2.0.6/aodh/tests/tempest/service/0000775000567000056710000000000013076064720020776 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/tempest/service/client.py0000664000567000056710000001064413076064371022635 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils as json from six.moves.urllib import parse as urllib from tempest import config from tempest.lib.common import rest_client from tempest import manager CONF = config.CONF class AlarmingClient(rest_client.RestClient): version = '2' uri_prefix = "v2" def deserialize(self, body): return json.loads(body.replace("\n", "")) def serialize(self, body): return json.dumps(body) def list_alarms(self, query=None): uri = '%s/alarms' % self.uri_prefix uri_dict = {} if query: uri_dict = {'q.field': query[0], 'q.op': query[1], 'q.value': query[2]} if uri_dict: uri += "?%s" % urllib.urlencode(uri_dict) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBodyList(resp, body) def show_alarm(self, alarm_id): uri = '%s/alarms/%s' % (self.uri_prefix, alarm_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBody(resp, body) def show_alarm_history(self, alarm_id): uri = "%s/alarms/%s/history" % (self.uri_prefix, alarm_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBodyList(resp, body) def delete_alarm(self, alarm_id): uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id) resp, body = self.delete(uri) self.expected_success(204, resp.status) if body: body = self.deserialize(body) return rest_client.ResponseBody(resp, body) def create_alarm(self, **kwargs): uri = "%s/alarms" % self.uri_prefix body = self.serialize(kwargs) resp, body = self.post(uri, body) self.expected_success(201, resp.status) body = self.deserialize(body) return rest_client.ResponseBody(resp, body) def update_alarm(self, alarm_id, **kwargs): uri = "%s/alarms/%s" % (self.uri_prefix, alarm_id) body = self.serialize(kwargs) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBody(resp, body) def show_alarm_state(self, alarm_id): uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id) resp, body = self.get(uri) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBodyData(resp, body) def alarm_set_state(self, alarm_id, state): uri = "%s/alarms/%s/state" % (self.uri_prefix, alarm_id) body = self.serialize(state) resp, body = self.put(uri, body) self.expected_success(200, resp.status) body = self.deserialize(body) return rest_client.ResponseBodyData(resp, body) class Manager(manager.Manager): default_params = { 'disable_ssl_certificate_validation': CONF.identity.disable_ssl_certificate_validation, 'ca_certs': CONF.identity.ca_certificates_file, 'trace_requests': CONF.debug.trace_requests } alarming_params = { 'service': CONF.alarming_plugin.catalog_type, 'region': CONF.identity.region, 'endpoint_type': CONF.alarming_plugin.endpoint_type, } alarming_params.update(default_params) def __init__(self, credentials=None, service=None): super(Manager, self).__init__(credentials) self.set_alarming_client() def set_alarming_client(self): self.alarming_client = AlarmingClient(self.auth_provider, **self.alarming_params) aodh-2.0.6/aodh/tests/tempest/service/__init__.py0000664000567000056710000000000013076064371023077 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/policy.json-pre-mikita0000664000567000056710000000015013076064372022106 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "segregation": "rule:context_is_admin", "default": "" } aodh-2.0.6/aodh/tests/open-policy.json0000664000567000056710000000015013076064371021004 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "segregation": "rule:context_is_admin", "default": "" } aodh-2.0.6/aodh/tests/test_hacking.py0000664000567000056710000000737613076064372020712 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import textwrap import mock import pep8 from testtools import testcase from aodh.hacking import checks class HackingTestCase(testcase.TestCase): """Test cases for aodh specific hacking rules. This class tests the hacking checks in aodh.hacking.checks by passing strings to the check methods like the pep8/flake8 parser would. The parser loops over each line in the file and then passes the parameters to the check method. The parameter names in the check method dictate what type of object is passed to the check method. The parameter types are:: logical_line: A processed line with the following modifications: - Multi-line statements converted to a single line. - Stripped left and right. - Contents of strings replaced with "xxx" of same length. - Comments removed. physical_line: Raw line of text from the input file. lines: a list of the raw lines from the input file tokens: the tokens that contribute to this logical line line_number: line number in the input file total_lines: number of lines in the input file blank_lines: blank lines before this one indent_char: indentation character in this file (" " or "\t") indent_level: indentation (with tabs expanded to multiples of 8) previous_indent_level: indentation on previous line previous_logical: previous logical line filename: Path of the file being run through pep8 When running a test on a check method the return will be False/None if there is no violation in the sample input. If there is an error a tuple is returned with a position in the line, and a message. So to check the result just assertTrue if the check is expected to fail and assertFalse if it should pass. """ # We are patching pep8 so that only the check under test is actually # installed. @mock.patch('pep8._checks', {'physical_line': {}, 'logical_line': {}, 'tree': {}}) def _run_check(self, code, checker, filename=None): pep8.register_check(checker) lines = textwrap.dedent(code).strip().splitlines(True) checker = pep8.Checker(filename=filename, lines=lines) checker.check_all() checker.report._deferred_print.sort() return checker.report._deferred_print def _assert_has_errors(self, code, checker, expected_errors=None, filename=None): actual_errors = [e[:3] for e in self._run_check(code, checker, filename)] self.assertEqual(expected_errors or [], actual_errors) def test_oslo_namespace_imports_check(self): codes = [ "from oslo.config import cfg", "import oslo.i18n", "from oslo.utils import timeutils", "from oslo.serialization import jsonutils", ] for code in codes: self._assert_has_errors(code, checks.check_oslo_namespace_imports, expected_errors=[(1, 0, "C300")]) self._assert_has_errors( code, checks.check_oslo_namespace_imports, filename="aodh/openstack/common/xyz.py") aodh-2.0.6/aodh/tests/mocks.py0000664000567000056710000000650313076064372017352 0ustar jenkinsjenkins00000000000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import happybase class MockHBaseTable(happybase.Table): def __init__(self, name, connection, data_prefix): # data_prefix is added to all rows which are written # in this test. It allows to divide data from different tests self.data_prefix = data_prefix # We create happybase Table with prefix from # AODH_TEST_HBASE_TABLE_PREFIX prefix = os.getenv("AODH_TEST_HBASE_TABLE_PREFIX", 'test') super(MockHBaseTable, self).__init__( "%s_%s" % (prefix, name), connection) def put(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).put(row, *args, **kwargs) def scan(self, row_start=None, row_stop=None, row_prefix=None, columns=None, filter=None, timestamp=None, include_timestamp=False, batch_size=10, scan_batching=None, limit=None, sorted_columns=False): # Add data prefix for row parameters # row_prefix could not be combined with row_start or row_stop if not row_start and not row_stop: row_prefix = self.data_prefix + (row_prefix or "") row_start = None row_stop = None elif row_start and not row_stop: # Adding data_prefix to row_start and row_stop does not work # if it looks like row_start = %data_prefix%foo, # row_stop = %data_prefix, because row_start > row_stop filter = self._update_filter_row(filter) row_start = self.data_prefix + row_start else: row_start = self.data_prefix + (row_start or "") row_stop = self.data_prefix + (row_stop or "") gen = super(MockHBaseTable, self).scan(row_start, row_stop, row_prefix, columns, filter, timestamp, include_timestamp, batch_size, scan_batching, limit, sorted_columns) data_prefix_len = len(self.data_prefix) # Restore original row format for row, data in gen: yield (row[data_prefix_len:], data) def row(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).row(row, *args, **kwargs) def delete(self, row, *args, **kwargs): row = self.data_prefix + row return super(MockHBaseTable, self).delete(row, *args, **kwargs) def _update_filter_row(self, filter): if filter: return "PrefixFilter(%s) AND %s" % (self.data_prefix, filter) else: return "PrefixFilter(%s)" % self.data_prefix aodh-2.0.6/aodh/tests/unit/0000775000567000056710000000000013076064720016634 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/unit/test_coordination.py0000664000567000056710000002311313076064372022740 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config import tooz.coordination from aodh import coordination from aodh import service from aodh.tests import base class MockToozCoordinator(object): def __init__(self, member_id, shared_storage): self._member_id = member_id self._groups = shared_storage self.is_started = False def start(self): self.is_started = True def stop(self): pass def heartbeat(self): pass def create_group(self, group_id): if group_id in self._groups: return MockAsyncError( tooz.coordination.GroupAlreadyExist(group_id)) self._groups[group_id] = {} return MockAsyncResult(None) def join_group(self, group_id, capabilities=b''): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) if self._member_id in self._groups[group_id]: return MockAsyncError( tooz.coordination.MemberAlreadyExist(group_id, self._member_id)) self._groups[group_id][self._member_id] = { "capabilities": capabilities, } return MockAsyncResult(None) def leave_group(self, group_id): return MockAsyncResult(None) def get_members(self, group_id): if group_id not in self._groups: return MockAsyncError( tooz.coordination.GroupNotCreated(group_id)) return MockAsyncResult(self._groups[group_id]) class MockToozCoordExceptionRaiser(MockToozCoordinator): def start(self): raise tooz.coordination.ToozError('error') def heartbeat(self): raise tooz.coordination.ToozError('error') def join_group(self, group_id, capabilities=b''): raise tooz.coordination.ToozError('error') def get_members(self, group_id): raise tooz.coordination.ToozError('error') class MockAsyncResult(tooz.coordination.CoordAsyncResult): def __init__(self, result): self.result = result def get(self, timeout=0): return self.result @staticmethod def done(): return True class MockAsyncError(tooz.coordination.CoordAsyncResult): def __init__(self, error): self.error = error def get(self, timeout=0): raise self.error @staticmethod def done(): return True class TestHashRing(base.BaseTestCase): def test_hash_ring(self): num_nodes = 10 num_keys = 1000 nodes = [str(x) for x in range(num_nodes)] hr = coordination.HashRing(nodes) buckets = [0] * num_nodes assignments = [-1] * num_keys for k in range(num_keys): n = int(hr.get_node(str(k))) self.assertTrue(0 <= n <= num_nodes) buckets[n] += 1 assignments[k] = n # at least something in each bucket self.assertTrue(all((c > 0 for c in buckets))) # approximately even distribution diff = max(buckets) - min(buckets) self.assertTrue(diff < 0.3 * (num_keys / num_nodes)) # consistency num_nodes += 1 nodes.append(str(num_nodes + 1)) hr = coordination.HashRing(nodes) for k in range(num_keys): n = int(hr.get_node(str(k))) assignments[k] -= n reassigned = len([c for c in assignments if c != 0]) self.assertTrue(reassigned < num_keys / num_nodes) class TestPartitioning(base.BaseTestCase): def setUp(self): super(TestPartitioning, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.shared_storage = {} def _get_new_started_coordinator(self, shared_storage, agent_id=None, coordinator_cls=None): coordinator_cls = coordinator_cls or MockToozCoordinator self.CONF.set_override('backend_url', 'xxx://yyy', group='coordination', enforce_type=True) with mock.patch('tooz.coordination.get_coordinator', lambda _, member_id: coordinator_cls(member_id, shared_storage)): pc = coordination.PartitionCoordinator(self.CONF, agent_id) pc.start() return pc def _usage_simulation(self, *agents_kwargs): partition_coordinators = [] for kwargs in agents_kwargs: partition_coordinator = self._get_new_started_coordinator( self.shared_storage, kwargs['agent_id'], kwargs.get( 'coordinator_cls')) partition_coordinator.join_group(kwargs['group_id']) partition_coordinators.append(partition_coordinator) for i, kwargs in enumerate(agents_kwargs): all_resources = kwargs.get('all_resources', []) expected_resources = kwargs.get('expected_resources', []) actual_resources = partition_coordinators[i].extract_my_subset( kwargs['group_id'], all_resources) self.assertEqual(expected_resources, actual_resources) def test_single_group(self): agents = [dict(agent_id='agent1', group_id='group'), dict(agent_id='agent2', group_id='group')] self._usage_simulation(*agents) self.assertEqual(['group'], sorted(self.shared_storage.keys())) self.assertEqual(['agent1', 'agent2'], sorted(self.shared_storage['group'].keys())) def test_multiple_groups(self): agents = [dict(agent_id='agent1', group_id='group1'), dict(agent_id='agent2', group_id='group2')] self._usage_simulation(*agents) self.assertEqual(['group1', 'group2'], sorted(self.shared_storage.keys())) def test_partitioning(self): all_resources = ['resource_%s' % i for i in range(1000)] agents = ['agent_%s' % i for i in range(10)] expected_resources = [list() for _ in range(len(agents))] hr = coordination.HashRing(agents) for r in all_resources: key = agents.index(hr.get_node(r)) expected_resources[key].append(r) agents_kwargs = [] for i, agent in enumerate(agents): agents_kwargs.append(dict(agent_id=agent, group_id='group', all_resources=all_resources, expected_resources=expected_resources[i])) self._usage_simulation(*agents_kwargs) @mock.patch.object(coordination.LOG, 'exception') def test_coordination_backend_offline(self, mocked_exception): agents = [dict(agent_id='agent1', group_id='group', all_resources=['res1', 'res2'], expected_resources=[], coordinator_cls=MockToozCoordExceptionRaiser)] self._usage_simulation(*agents) called = [mock.call(u'Error connecting to coordination backend.'), mock.call(u'Error getting group membership info from ' u'coordination backend.')] self.assertEqual(called, mocked_exception.call_args_list) @mock.patch.object(coordination.LOG, 'exception') @mock.patch.object(coordination.LOG, 'info') def test_reconnect(self, mock_info, mocked_exception): coord = self._get_new_started_coordinator({}, 'a', MockToozCoordExceptionRaiser) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordExceptionRaiser('a', {})): coord.heartbeat() called = [mock.call(u'Error connecting to coordination backend.'), mock.call(u'Error connecting to coordination backend.'), mock.call(u'Error sending a heartbeat to coordination ' u'backend.')] self.assertEqual(called, mocked_exception.call_args_list) with mock.patch('tooz.coordination.get_coordinator', return_value=MockToozCoordinator('a', {})): coord.heartbeat() mock_info.assert_called_with(u'Coordination backend started ' u'successfully.') def test_group_id_none(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) with mock.patch.object(coord._coordinator, 'join_group') as mocked: coord.join_group(None) self.assertEqual(0, mocked.call_count) with mock.patch.object(coord._coordinator, 'leave_group') as mocked: coord.leave_group(None) self.assertEqual(0, mocked.call_count) def test_stop(self): coord = self._get_new_started_coordinator({}, 'a') self.assertTrue(coord._coordinator.is_started) coord.join_group("123") coord.stop() self.assertIsEmpty(coord._groups) self.assertIsNone(coord._coordinator) aodh-2.0.6/aodh/tests/unit/test_api_v2_capabilities.py0000664000567000056710000000441713076064371024146 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from aodh.api.controllers.v2 import capabilities class TestCapabilities(base.BaseTestCase): def test_recursive_keypairs(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B'}} pairs = list(capabilities._recursive_keypairs(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested:a', 'A'), ('nested:b', 'B')], pairs) def test_recursive_keypairs_with_separator(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, } separator = '.' pairs = list(capabilities._recursive_keypairs(data, separator)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B')], pairs) def test_recursive_keypairs_with_list_of_dict(self): small = 1 big = 1 << 64 expected = [('a', 'A'), ('b', 'B'), ('nested:list', [{small: 99, big: 42}])] data = {'a': 'A', 'b': 'B', 'nested': {'list': [{small: 99, big: 42}]}} pairs = list(capabilities._recursive_keypairs(data)) self.assertEqual(len(expected), len(pairs)) for k, v in pairs: # the keys 1 and 1<<64 cause a hash collision on 64bit platforms if k == 'nested:list': self.assertIn(v, [[{small: 99, big: 42}], [{big: 42, small: 99}]]) else: self.assertIn((k, v), expected) aodh-2.0.6/aodh/tests/unit/test_event.py0000664000567000056710000000343613076064372021377 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from oslo_messaging import server from aodh import event from aodh import service from aodh.tests import base as tests_base class TestEventAlarmEvaluationService(tests_base.BaseTestCase): def setUp(self): super(TestEventAlarmEvaluationService, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.storage_conn = mock.MagicMock() self.setup_messaging(self.CONF) with mock.patch('aodh.storage.get_connection_from_config', return_value=self.storage_conn): self.service = event.EventAlarmEvaluationService(self.CONF) def test_start_and_stop_service(self): self.service.start() self.assertIsInstance(self.service.listener, server.MessageHandlingServer) self.service.stop() def test_listener_start_called(self): listener = mock.Mock() with mock.patch('aodh.messaging.get_notification_listener', return_value=listener): self.service.start() self.assertTrue(listener.start.called) aodh-2.0.6/aodh/tests/unit/test_evaluator.py0000664000567000056710000001426213076064372022257 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh.evaluator.AlarmEvaluationService. """ import mock from oslo_config import fixture as fixture_config from stevedore import extension from aodh import evaluator from aodh import service from aodh.tests import base as tests_base class TestAlarmEvaluationService(tests_base.BaseTestCase): def setUp(self): super(TestAlarmEvaluationService, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.setup_messaging(self.CONF) self.threshold_eval = mock.Mock() self.evaluators = extension.ExtensionManager.make_test_instance( [ extension.Extension( 'threshold', None, None, self.threshold_eval), ] ) self.storage_conn = mock.MagicMock() self.svc = evaluator.AlarmEvaluationService(self.CONF) self.svc.tg = mock.Mock() self.svc.partition_coordinator = mock.MagicMock() p_coord = self.svc.partition_coordinator p_coord.extract_my_subset.side_effect = lambda _, x: x self.svc.evaluators = self.evaluators self.svc.supported_evaluators = ['threshold'] def _do_test_start(self, test_interval=120, coordination_heartbeat=1.0, coordination_active=False): self.CONF.set_override('evaluation_interval', test_interval) self.CONF.set_override('heartbeat', coordination_heartbeat, group='coordination', enforce_type=True) with mock.patch('aodh.storage.get_connection_from_config', return_value=self.storage_conn): p_coord_mock = self.svc.partition_coordinator p_coord_mock.is_active.return_value = coordination_active self.svc.start() self.svc.partition_coordinator.start.assert_called_once_with() self.svc.partition_coordinator.join_group.assert_called_once_with( self.svc.PARTITIONING_GROUP_NAME) initial_delay = test_interval if coordination_active else None expected = [ mock.call(test_interval, self.svc._evaluate_assigned_alarms, initial_delay=initial_delay), mock.call(604800, mock.ANY), ] if coordination_active: hb_interval = min(coordination_heartbeat, test_interval / 4) hb_call = mock.call(hb_interval, self.svc.partition_coordinator.heartbeat) expected.insert(1, hb_call) actual = self.svc.tg.add_timer.call_args_list self.assertEqual(expected, actual) def test_start_singleton(self): self._do_test_start(coordination_active=False) def test_start_coordinated(self): self._do_test_start(coordination_active=True) def test_start_coordinated_high_hb_interval(self): self._do_test_start(coordination_active=True, test_interval=10, coordination_heartbeat=5) def test_evaluation_cycle(self): alarm = mock.Mock(type='threshold', alarm_id="alarm_id1") self.storage_conn.get_alarms.return_value = [alarm] with mock.patch('aodh.storage.get_connection_from_config', return_value=self.storage_conn): p_coord_mock = self.svc.partition_coordinator p_coord_mock.extract_my_subset.return_value = [alarm] self.svc._evaluate_assigned_alarms() p_coord_mock.extract_my_subset.assert_called_once_with( self.svc.PARTITIONING_GROUP_NAME, ["alarm_id1"]) self.threshold_eval.evaluate.assert_called_once_with(alarm) def test_evaluation_cycle_with_bad_alarm(self): alarms = [ mock.Mock(type='threshold', name='bad'), mock.Mock(type='threshold', name='good'), ] self.storage_conn.get_alarms.return_value = alarms self.threshold_eval.evaluate.side_effect = [Exception('Boom!'), None] with mock.patch('aodh.storage.get_connection_from_config', return_value=self.storage_conn): p_coord_mock = self.svc.partition_coordinator p_coord_mock.extract_my_subset.return_value = alarms self.svc._evaluate_assigned_alarms() self.assertEqual([mock.call(alarms[0]), mock.call(alarms[1])], self.threshold_eval.evaluate.call_args_list) def test_unknown_extension_skipped(self): alarms = [ mock.Mock(type='not_existing_type'), mock.Mock(type='threshold') ] self.storage_conn.get_alarms.return_value = alarms with mock.patch('aodh.storage.get_connection_from_config', return_value=self.storage_conn): self.svc.start() self.svc._evaluate_assigned_alarms() self.threshold_eval.evaluate.assert_called_once_with(alarms[1]) def test_check_alarm_query_constraints(self): self.storage_conn.get_alarms.return_value = [] with mock.patch('aodh.storage.get_connection_from_config', return_value=self.storage_conn): self.svc.start() self.svc._evaluate_assigned_alarms() expected = [({'enabled': True, 'exclude': {'type': 'event'}},)] self.assertEqual(expected, self.storage_conn.get_alarms.call_args_list) aodh-2.0.6/aodh/tests/unit/test_messaging.py0000664000567000056710000000515413076064372022232 0ustar jenkinsjenkins00000000000000# Copyright (C) 2014 eNovance SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import fixture as fixture_config import oslo_messaging.conffixture from oslotest import base from aodh import messaging class MessagingTests(base.BaseTestCase): def setUp(self): super(MessagingTests, self).setUp() self.CONF = self.useFixture(fixture_config.Config()).conf self.useFixture(oslo_messaging.conffixture.ConfFixture(self.CONF)) def test_get_transport_invalid_url(self): self.assertRaises(oslo_messaging.InvalidTransportURL, messaging.get_transport, self.CONF, "notvalid!") def test_get_transport_url_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://') self.assertEqual(t1, t2) def test_get_transport_default_url_caching(self): t1 = messaging.get_transport(self.CONF, ) t2 = messaging.get_transport(self.CONF, ) self.assertEqual(t1, t2) def test_get_transport_default_url_no_caching(self): t1 = messaging.get_transport(self.CONF, cache=False) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_no_caching(self): t1 = messaging.get_transport(self.CONF, 'fake://', cache=False) t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_default_url_caching_mix(self): t1 = messaging.get_transport(self.CONF, ) t2 = messaging.get_transport(self.CONF, cache=False) self.assertNotEqual(t1, t2) def test_get_transport_url_caching_mix(self): t1 = messaging.get_transport(self.CONF, 'fake://') t2 = messaging.get_transport(self.CONF, 'fake://', cache=False) self.assertNotEqual(t1, t2) def test_get_transport_optional(self): self.CONF.set_override('rpc_backend', '') self.assertIsNone(messaging.get_transport(self.CONF, optional=True, cache=False)) aodh-2.0.6/aodh/tests/unit/__init__.py0000664000567000056710000000000013076064371020735 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/unit/evaluator/0000775000567000056710000000000013076064720020636 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/unit/evaluator/test_gnocchi.py0000664000567000056710000005315613076064372023676 0ustar jenkinsjenkins00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import unittest import uuid from gnocchiclient import exceptions import mock from oslo_utils import timeutils from oslotest import mockpatch import pytz import six from six import moves from aodh.evaluator import gnocchi from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base class TestGnocchiEvaluatorBase(base.TestEvaluatorBase): def setUp(self): self.client = self.useFixture(mockpatch.Patch( 'aodh.evaluator.gnocchi.client' )).mock.Client.return_value self.prepared_alarms = [ models.Alarm(name='instance_running_hot', description='instance_running_hot', type='gnocchi_resources_threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( comparison_operator='gt', threshold=80.0, evaluation_periods=5, aggregation_method='mean', granularity=60, metric='cpu_util', resource_type='instance', resource_id='my_instance') ), models.Alarm(name='group_running_idle', description='group_running_idle', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, user_id='foobar', project_id='snafu', state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=str(uuid.uuid4()), time_constraints=[], rule=dict( comparison_operator='le', threshold=10.0, evaluation_periods=4, aggregation_method='max', granularity=300, metrics=['0bb1604d-1193-4c0a-b4b8-74b170e35e83', '9ddc209f-42f8-41e1-b8f1-8804f59c4053']), ), models.Alarm(name='instance_not_running', description='instance_running_hot', type='gnocchi_aggregation_by_resources_threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( comparison_operator='gt', threshold=80.0, evaluation_periods=6, aggregation_method='mean', granularity=50, metric='cpu_util', resource_type='instance', query='{"=": {"server_group": ' '"my_autoscaling_group"}}') ), ] super(TestGnocchiEvaluatorBase, self).setUp() @staticmethod def _get_stats(granularity, values): now = timeutils.utcnow_ts() return [[six.text_type(now - len(values) * granularity), granularity, value] for value in values] @staticmethod def _reason_data(disposition, count, most_recent): return {'type': 'threshold', 'disposition': disposition, 'count': count, 'most_recent': most_recent} def _set_all_rules(self, field, value): for alarm in self.alarms: alarm.rule[field] = value def _test_retry_transient(self): self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') self._evaluate_all_alarms() self._assert_all_alarms('ok') def _test_simple_insufficient(self): self._set_all_alarms('ok') self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) expected = [mock.call( alarm, 'ok', ('%d datapoints are unknown' % alarm.rule['evaluation_periods']), self._reason_data('unknown', alarm.rule['evaluation_periods'], None)) for alarm in self.alarms] self.assertEqual(expected, self.notifier.notify.call_args_list) class TestGnocchiResourceThresholdEvaluate(TestGnocchiEvaluatorBase): EVALUATOR = gnocchi.GnocchiResourceThresholdEvaluator def prepare_alarms(self): self.alarms = self.prepared_alarms[0:1] def test_retry_transient_api_failure(self): means = self._get_stats(60, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(5)]) self.client.metric.get_measures.side_effect = [ exceptions.ClientException(501, "error2"), means] self._test_retry_transient() def test_simple_insufficient(self): self.client.metric.get_measures.return_value = [] self._test_simple_insufficient() @mock.patch.object(timeutils, 'utcnow') def test_simple_alarm_trip(self, utcnow): utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) self._set_all_alarms('ok') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() start_alarm = "2015-01-26T12:51:00" end = "2015-01-26T12:57:00" self.assertEqual( [mock.call.get_measures(aggregation='mean', metric='cpu_util', resource_id='my_instance', start=start_alarm, stop=end)], self.client.metric.mock_calls) reason = ('Transition to alarm due to 5 samples outside threshold,' ' most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_simple_alarm_clear(self): self._set_all_alarms('alarm') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(5)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to ok due to 5 samples inside' ' threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('inside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_equivocal_from_known_state_ok(self): self._set_all_alarms('ok') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(5)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list) def test_state_change_and_repeat_actions(self): self._set_all_alarms('ok') self.alarms[0].repeat_actions = True avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to alarm due to 5 samples outside ' 'threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_equivocal_from_unknown(self): self._set_all_alarms('insufficient data') avgs = self._get_stats(60, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(1, 6)]) self.client.metric.get_measures.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to alarm due to 5 samples outside' ' threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 5, avgs[-1][2]) expected = mock.call(self.alarms[0], 'insufficient data', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) @unittest.skipIf(six.PY3, "the aodh base class is not python 3 ready") @mock.patch.object(timeutils, 'utcnow') def test_no_state_change_outside_time_constraint(self, mock_utcnow): self._set_all_alarms('ok') self.alarms[0].time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] dt = datetime.datetime(2014, 1, 1, 15, 0, 0, tzinfo=pytz.timezone('Europe/Ljubljana')) mock_utcnow.return_value = dt.astimezone(pytz.UTC) self.client.metric.get_measures.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('ok') update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual([], update_calls, "Alarm should not change state if the current " " time is outside its time constraint.") self.assertEqual([], self.notifier.notify.call_args_list) class TestGnocchiAggregationMetricsThresholdEvaluate(TestGnocchiEvaluatorBase): EVALUATOR = gnocchi.GnocchiAggregationMetricsThresholdEvaluator def prepare_alarms(self): self.alarms = self.prepared_alarms[1:2] def test_retry_transient_api_failure(self): maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(4)]) self.client.metric.aggregation.side_effect = [Exception('boom'), maxs] self._test_retry_transient() def test_simple_insufficient(self): self.client.metric.aggregation.return_value = [] self._test_simple_insufficient() @mock.patch.object(timeutils, 'utcnow') def test_simple_alarm_trip(self, utcnow): utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) self._set_all_alarms('ok') maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(4)]) self.client.metric.aggregation.side_effect = [maxs] self._evaluate_all_alarms() start_alarm = "2015-01-26T12:32:00" end = "2015-01-26T12:57:00" self.assertEqual( [mock.call.aggregation(aggregation='max', metrics=[ '0bb1604d-1193-4c0a-b4b8-74b170e35e83', '9ddc209f-42f8-41e1-b8f1-8804f59c4053'], needed_overlap=0, start=start_alarm, stop=end)], self.client.metric.mock_calls) self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to alarm due to 4 samples outside ' 'threshold, most recent: %s' % maxs[-1][2]) reason_data = self._reason_data('outside', 4, maxs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_simple_alarm_clear(self): self._set_all_alarms('alarm') maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(1, 5)]) self.client.metric.aggregation.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to ok due to 4 samples inside ' 'threshold, most recent: %s' % maxs[-1][2]) reason_data = self._reason_data('inside', 4, maxs[-1][2]) expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_equivocal_from_known_state_ok(self): self._set_all_alarms('ok') maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(-1, 3)]) self.client.metric.aggregation.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual( [], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list) def test_equivocal_ok_to_alarm(self): self._set_all_alarms('ok') # NOTE(sileht): we add one useless point (81.0) that will break # the test if the evaluator doesn't remove it. maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(-1, 5)]) self.client.metric.aggregation.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') def test_equivocal_from_known_state_and_repeat_actions(self): self._set_all_alarms('ok') self.alarms[0].repeat_actions = True maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(-1, 3)]) self.client.metric.aggregation.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) reason = ('Remaining as ok due to 1 samples inside' ' threshold, most recent: 8.0') reason_datas = self._reason_data('inside', 1, 8.0) expected = [mock.call(self.alarms[0], 'ok', reason, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_unequivocal_from_known_state_and_repeat_actions(self): self._set_all_alarms('alarm') self.alarms[0].repeat_actions = True maxs = self._get_stats(300, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(4)]) self.client.metric.aggregation.side_effect = [maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) reason = ('Remaining as alarm due to 4 samples outside' ' threshold, most recent: 7.0') reason_datas = self._reason_data('outside', 4, 7.0) expected = [mock.call(self.alarms[0], 'alarm', reason, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) class TestGnocchiAggregationResourcesThresholdEvaluate( TestGnocchiEvaluatorBase): EVALUATOR = gnocchi.GnocchiAggregationResourcesThresholdEvaluator def prepare_alarms(self): self.alarms = self.prepared_alarms[2:3] def test_retry_transient_api_failure(self): avgs2 = self._get_stats(50, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(6)]) self.client.metric.aggregation.side_effect = [ exceptions.ClientException(500, "error"), avgs2] self._test_retry_transient() def test_simple_insufficient(self): self.client.metric.aggregation.return_value = [] self._test_simple_insufficient() @mock.patch.object(timeutils, 'utcnow') def test_simple_alarm_trip(self, utcnow): utcnow.return_value = datetime.datetime(2015, 1, 26, 12, 57, 0, 0) self._set_all_alarms('ok') avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(1, 7)]) self.client.metric.aggregation.side_effect = [avgs] self._evaluate_all_alarms() start_alarm = "2015-01-26T12:51:10" end = "2015-01-26T12:57:00" self.assertEqual( [mock.call.aggregation(aggregation='mean', metrics='cpu_util', needed_overlap=0, query={"=": {"server_group": "my_autoscaling_group"}}, resource_type='instance', start=start_alarm, stop=end)], self.client.metric.mock_calls) self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to alarm due to 6 samples outside ' 'threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('outside', 6, avgs[-1][2]) expected = mock.call(self.alarms[0], 'ok', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_simple_alarm_clear(self): self._set_all_alarms('alarm') avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] - v for v in moves.xrange(6)]) self.client.metric.aggregation.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reason = ('Transition to ok due to 6 samples inside ' 'threshold, most recent: %s' % avgs[-1][2]) reason_data = self._reason_data('inside', 6, avgs[-1][2]) expected = mock.call(self.alarms[0], 'alarm', reason, reason_data) self.assertEqual(expected, self.notifier.notify.call_args) def test_equivocal_from_known_state_ok(self): self._set_all_alarms('ok') avgs = self._get_stats(50, [self.alarms[0].rule['threshold'] + v for v in moves.xrange(6)]) self.client.metric.aggregation.side_effect = [avgs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual( [], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list) aodh-2.0.6/aodh/tests/unit/evaluator/test_event.py0000664000567000056710000004375313076064372023407 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import six import uuid import mock from oslo_utils import timeutils from aodh import evaluator from aodh.evaluator import event as event_evaluator from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base class TestEventAlarmEvaluate(base.TestEvaluatorBase): EVALUATOR = event_evaluator.EventAlarmEvaluator @staticmethod def _alarm(**kwargs): alarm_id = kwargs.get('id') or str(uuid.uuid4()) return models.Alarm(name=kwargs.get('name', alarm_id), type='event', enabled=True, alarm_id=alarm_id, description='desc', state=kwargs.get('state', 'insufficient data'), severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=kwargs.get('repeat', False), user_id='user', project_id=kwargs.get('project', ''), time_constraints=[], rule=dict(event_type=kwargs.get('event_type', '*'), query=kwargs.get('query', []))) @staticmethod def _event(**kwargs): return {'message_id': kwargs.get('id') or str(uuid.uuid4()), 'event_type': kwargs.get('event_type', 'type0'), 'traits': kwargs.get('traits', [])} def _setup_alarm_storage(self, alarms): self._stored_alarms = {a.alarm_id: copy.deepcopy(a) for a in alarms} self._update_history = [] def get_alarms(**kwargs): return (a for a in six.itervalues(self._stored_alarms)) def update_alarm(alarm): self._stored_alarms[alarm.alarm_id] = copy.deepcopy(alarm) self._update_history.append(dict(alarm_id=alarm.alarm_id, state=alarm.state)) self.storage_conn.get_alarms.side_effect = get_alarms self.storage_conn.update_alarm.side_effect = update_alarm def _setup_alarm_notifier(self): self._notification_history = [] def notify(alarm, previous, reason, data): self._notification_history.append(dict(alarm_id=alarm.alarm_id, state=alarm.state, previous=previous, reason=reason, data=data)) self.notifier.notify.side_effect = notify def _do_test_event_alarm(self, alarms, events, expect_db_queries=None, expect_alarm_states=None, expect_alarm_updates=None, expect_notifications=None): self._setup_alarm_storage(alarms) self._setup_alarm_notifier() self.evaluator.evaluate_events(events) if expect_db_queries is not None: expected = [mock.call(enabled=True, alarm_type='event', project=p) for p in expect_db_queries] self.assertEqual(expected, self.storage_conn.get_alarms.call_args_list) if expect_alarm_states is not None: for alarm_id, state in six.iteritems(expect_alarm_states): self.assertEqual(state, self._stored_alarms[alarm_id].state) if expect_alarm_updates is not None: self.assertEqual(len(expect_alarm_updates), len(self._update_history)) for alarm, h in zip(expect_alarm_updates, self._update_history): expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM) self.assertEqual(expected, h) if expect_notifications is not None: self.assertEqual(len(expect_notifications), len(self._notification_history)) for n, h in zip(expect_notifications, self._notification_history): alarm = n['alarm'] event = n['event'] previous = n.get('previous', evaluator.UNKNOWN) reason = ('Event (message_id=%(e)s) hit the query of alarm ' '(id=%(a)s)' % {'e': event['message_id'], 'a': alarm.alarm_id}) data = {'type': 'event', 'event': event} expected = dict(alarm_id=alarm.alarm_id, state=evaluator.ALARM, previous=previous, reason=reason, data=data) self.assertEqual(expected, h) def test_fire_alarm_in_the_same_project_id(self): alarm = self._alarm(project='project1') event = self._event(traits=[['project_id', 1, 'project1']]) self._do_test_event_alarm( [alarm], [event], expect_db_queries=['project1'], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_fire_alarm_in_the_same_tenant_id(self): alarm = self._alarm(project='project1') event = self._event(traits=[['tenant_id', 1, 'project1']]) self._do_test_event_alarm( [alarm], [event], expect_db_queries=['project1'], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_fire_alarm_in_project_none(self): alarm = self._alarm(project='') event = self._event() self._do_test_event_alarm( [alarm], [event], expect_db_queries=[''], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_continue_following_evaluation_after_exception(self): alarms = [ self._alarm(id=1), self._alarm(id=2), ] event = self._event() original = self.evaluator._fire_alarm with mock.patch.object(event_evaluator.EventAlarmEvaluator, '_fire_alarm') as _fire_alarm: def _side_effect(*args, **kwargs): _fire_alarm.side_effect = original return Exception('boom') _fire_alarm.side_effect = _side_effect self._do_test_event_alarm( alarms, [event], expect_alarm_states={alarms[0].alarm_id: evaluator.UNKNOWN, alarms[1].alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarms[1]], expect_notifications=[dict(alarm=alarms[1], event=event)]) def test_skip_event_missing_event_type(self): alarm = self._alarm() event = {'message_id': str(uuid.uuid4()), 'traits': []} self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_skip_event_missing_message_id(self): alarm = self._alarm() event = {'event_type': 'type1', 'traits': []} self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_continue_alarming_when_repeat_actions_enabled(self): alarm = self._alarm(repeat=True, state=evaluator.ALARM) event = self._event() self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event, previous=evaluator.ALARM)]) def test_do_not_continue_alarming_when_repeat_actions_disabled(self): alarm = self._alarm(repeat=False, state=evaluator.ALARM) event = self._event() self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[], expect_notifications=[]) def test_skip_uninterested_event_type(self): alarm = self._alarm(event_type='compute.instance.exists') event = self._event(event_type='compute.instance.update') self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_event_type_pattern_matched(self): alarm = self._alarm(event_type='compute.instance.*') event = self._event(event_type='compute.instance.update') self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_event_type_pattern_unmatched(self): alarm = self._alarm(event_type='compute.instance.*') event = self._event(event_type='dummy.compute.instance') self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_string(self): alarm = self._alarm(query=[dict(field="traits.state", value="stopped", op="eq")]) event = self._event(traits=[['state', 1, 'stopped']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_string(self): alarm = self._alarm(query=[dict(field="traits.state", value="stopped", op="eq")]) event = self._event(traits=[['state', 1, 'active']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_integer(self): alarm = self._alarm(query=[dict(field="traits.instance_type_id", type="integer", value="5", op="eq")]) event = self._event(traits=[['instance_type_id', 2, 5]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_integer(self): alarm = self._alarm(query=[dict(field="traits.instance_type_id", type="integer", value="5", op="eq")]) event = self._event(traits=[['instance_type_id', 2, 6]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_float(self): alarm = self._alarm(query=[dict(field="traits.io_read_kbs", type="float", value="123.456", op="eq")]) event = self._event(traits=[['io_read_kbs', 3, 123.456]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_float(self): alarm = self._alarm(query=[dict(field="traits.io_read_kbs", type="float", value="123.456", op="eq")]) event = self._event(traits=[['io_read_kbs', 3, 456.123]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_fire_alarm_query_matched_datetime(self): alarm = self._alarm(query=[dict(field="traits.created_at", type="datetime", value="2015-09-01T18:52:27.214309", op="eq")]) event = self._event(traits=[['created_at', 4, '2015-09-01T18:52:27.214309']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=event)]) def test_skip_query_unmatched_datetime(self): alarm = self._alarm(query=[dict(field="traits.created_at", type="datetime", value="2015-09-01T18:52:27.214309", op="eq")]) event = self._event(traits=[['created_at', 4, '2015-09-02T18:52:27.214309']]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_skip_alarm_due_to_uncompareable_trait(self): alarm = self._alarm(query=[dict(field="traits.created_at", type="datetime", value="2015-09-01T18:52:27.214309", op="eq")]) event = self._event(traits=[['created_at', 3, 123.456]]) self._do_test_event_alarm( [alarm], [event], expect_alarm_states={alarm.alarm_id: evaluator.UNKNOWN}, expect_alarm_updates=[], expect_notifications=[]) def test_event_alarm_cache_hit(self): alarm = self._alarm(project='project2', event_type='none') events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project2']]), ] self._do_test_event_alarm([alarm], events, expect_db_queries=['project2']) def test_event_alarm_cache_updated_after_fired(self): alarm = self._alarm(project='project2', event_type='type1', repeat=False) events = [ self._event(event_type='type1', traits=[['project_id', 1, 'project2']]), self._event(event_type='type1', traits=[['project_id', 1, 'project2']]), ] self._do_test_event_alarm( [alarm], events, expect_db_queries=['project2'], expect_alarm_states={alarm.alarm_id: evaluator.ALARM}, expect_alarm_updates=[alarm], expect_notifications=[dict(alarm=alarm, event=events[0])]) def test_event_alarm_caching_disabled(self): alarm = self._alarm(project='project2', event_type='none') events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project2']]), ] self.evaluator.conf.event_alarm_cache_ttl = 0 self._do_test_event_alarm([alarm], events, expect_db_queries=['project2', 'project2']) @mock.patch.object(timeutils, 'utcnow') def test_event_alarm_cache_expired(self, mock_utcnow): alarm = self._alarm(project='project2', event_type='none') events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project2']]), ] mock_utcnow.side_effect = [ datetime.datetime(2015, 1, 1, 0, 0, 0), datetime.datetime(2015, 1, 1, 1, 0, 0), datetime.datetime(2015, 1, 1, 1, 1, 0), ] self._do_test_event_alarm([alarm], events, expect_db_queries=['project2', 'project2']) def test_event_alarm_cache_miss(self): events = [ self._event(traits=[['project_id', 1, 'project2']]), self._event(traits=[['project_id', 1, 'project3']]), ] self._do_test_event_alarm([], events, expect_db_queries=['project2', 'project3']) aodh-2.0.6/aodh/tests/unit/evaluator/test_base.py0000664000567000056710000001436313076064372023173 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslo_utils import timeutils from oslotest import base from aodh import evaluator from aodh import queue class TestEvaluatorBaseClass(base.BaseTestCase): def setUp(self): super(TestEvaluatorBaseClass, self).setUp() self.called = False def _notify(self, alarm, previous, reason, details): self.called = True raise Exception('Boom!') @mock.patch.object(queue, 'AlarmNotifier') def test_base_refresh(self, notifier): notifier.notify = self._notify class EvaluatorSub(evaluator.Evaluator): def evaluate(self, alarm): pass ev = EvaluatorSub(mock.MagicMock()) ev.notifier = notifier ev.storage_conn = mock.MagicMock() ev._record_change = mock.MagicMock() ev._refresh(mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock.MagicMock()) ev.storage_conn.update_alarm.assert_called_once_with(mock.ANY) ev._record_change.assert_called_once_with(mock.ANY) self.assertTrue(self.called) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints(self, mock_utcnow): alarm = mock.MagicMock() alarm.time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': ''}, {'name': 'test2', 'description': 'test', 'start': '0 23 * * *', # daily at 23:00 'duration': 10800, # 3 hours 'timezone': ''}, ] cls = evaluator.Evaluator mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 12, 0, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 1, 0, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 2, 5, 0, 0) self.assertFalse(cls.within_time_constraint(alarm)) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints_by_month(self, mock_utcnow): alarm = mock.MagicMock() alarm.time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 31 1,3,5,7,8,10,12 *', # every 31st at 11:00 'duration': 10800, # 3 hours 'timezone': ''}, ] cls = evaluator.Evaluator mock_utcnow.return_value = datetime.datetime(2015, 3, 31, 11, 30, 0) self.assertTrue(cls.within_time_constraint(alarm)) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints_complex(self, mock_utcnow): alarm = mock.MagicMock() alarm.time_constraints = [ {'name': 'test', 'description': 'test', # Every consecutive 2 minutes (from the 3rd to the 57th) past # every consecutive 2 hours (between 3:00 and 12:59) on every day. 'start': '3-57/2 3-12/2 * * *', 'duration': 30, 'timezone': ''} ] cls = evaluator.Evaluator # test minutes inside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 3, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 57, 0) self.assertTrue(cls.within_time_constraint(alarm)) # test minutes outside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 2, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 4, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 58, 0) self.assertFalse(cls.within_time_constraint(alarm)) # test hours inside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 3, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 5, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 11, 31, 0) self.assertTrue(cls.within_time_constraint(alarm)) # test hours outside mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 1, 31, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 4, 31, 0) self.assertFalse(cls.within_time_constraint(alarm)) mock_utcnow.return_value = datetime.datetime(2014, 1, 5, 12, 31, 0) self.assertFalse(cls.within_time_constraint(alarm)) @mock.patch.object(timeutils, 'utcnow') def test_base_time_constraints_timezone(self, mock_utcnow): alarm = mock.MagicMock() cls = evaluator.Evaluator mock_utcnow.return_value = datetime.datetime(2014, 1, 1, 11, 0, 0) alarm.time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] self.assertTrue(cls.within_time_constraint(alarm)) alarm.time_constraints = [ {'name': 'test2', 'description': 'test2', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'US/Eastern'} ] self.assertFalse(cls.within_time_constraint(alarm)) aodh-2.0.6/aodh/tests/unit/evaluator/test_threshold.py0000664000567000056710000006457313076064372024265 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/evaluator/threshold.py """ import copy import datetime import json import uuid from ceilometerclient import exc from ceilometerclient.v2 import statistics import mock from oslo_utils import timeutils import pytz from six import moves from aodh.evaluator import threshold from aodh import messaging from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base class TestEvaluate(base.TestEvaluatorBase): EVALUATOR = threshold.ThresholdEvaluator def prepare_alarms(self): self.alarms = [ models.Alarm(name='instance_running_hot', description='instance_running_hot', type='threshold', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( comparison_operator='gt', threshold=80.0, evaluation_periods=5, statistic='avg', period=60, meter_name='cpu_util', query=[{'field': 'meter', 'op': 'eq', 'value': 'cpu_util'}, {'field': 'resource_id', 'op': 'eq', 'value': 'my_instance'}]), severity='critical' ), models.Alarm(name='group_running_idle', description='group_running_idle', type='threshold', enabled=True, user_id='foobar', project_id='snafu', state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=str(uuid.uuid4()), time_constraints=[], rule=dict( comparison_operator='le', threshold=10.0, evaluation_periods=4, statistic='max', period=300, meter_name='cpu_util', query=[{'field': 'meter', 'op': 'eq', 'value': 'cpu_util'}, {'field': 'metadata.user_metadata.AS', 'op': 'eq', 'value': 'my_group'}]), severity='critical' ), ] @staticmethod def _get_stat(attr, value, count=1): return statistics.Statistics(None, {attr: value, 'count': count}) @staticmethod def _reason_data(disposition, count, most_recent): return {'type': 'threshold', 'disposition': disposition, 'count': count, 'most_recent': most_recent} def _set_all_rules(self, field, value): for alarm in self.alarms: alarm.rule[field] = value def test_retry_transient_api_failure(self): broken = exc.CommunicationError(message='broken') avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) for v in moves.xrange(5)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) for v in moves.xrange(1, 5)] self.api_client.statistics.list.side_effect = [broken, broken, avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') self._evaluate_all_alarms() self._assert_all_alarms('ok') def test_simple_insufficient(self): self._set_all_alarms('ok') self.api_client.statistics.list.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) expected = [mock.call( alarm, 'ok', ('%d datapoints are unknown' % alarm.rule['evaluation_periods']), self._reason_data('unknown', alarm.rule['evaluation_periods'], None)) for alarm in self.alarms] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_less_insufficient_data(self): self._set_all_alarms('ok') avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) for v in moves.xrange(4)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(1, 4)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(update_calls, expected) expected = [mock.call( alarm, 'ok', ('%d datapoints are unknown' % alarm.rule['evaluation_periods']), self._reason_data('unknown', alarm.rule['evaluation_periods'], alarm.rule['threshold'] - 3)) for alarm in self.alarms] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_simple_alarm_trip(self): self._set_all_alarms('ok') avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(1, 6)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(4)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons = ['Transition to alarm due to 5 samples outside' ' threshold, most recent: %s' % avgs[-1].avg, 'Transition to alarm due to 4 samples outside' ' threshold, most recent: %s' % maxs[-1].max] reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), self._reason_data('outside', 4, maxs[-1].max)] expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_simple_alarm_clear(self): self._set_all_alarms('alarm') avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] - v) for v in moves.xrange(5)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] + v) for v in moves.xrange(1, 5)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons = ['Transition to ok due to 5 samples inside' ' threshold, most recent: %s' % avgs[-1].avg, 'Transition to ok due to 4 samples inside' ' threshold, most recent: %s' % maxs[-1].max] reason_datas = [self._reason_data('inside', 5, avgs[-1].avg), self._reason_data('inside', 4, maxs[-1].max)] expected = [mock.call(alarm, 'alarm', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def _construct_payloads(self): payloads = [] for alarm in self.alarms: type = models.AlarmChange.STATE_TRANSITION detail = json.dumps({'state': alarm.state}) on_behalf_of = alarm.project_id payload = dict( event_id='fake_event_id_%s' % self.alarms.index(alarm), alarm_id=alarm.alarm_id, type=type, detail=detail, user_id='fake_user_id', project_id='fake_project_id', on_behalf_of=on_behalf_of, timestamp=datetime.datetime(2015, 7, 26, 3, 33, 21, 876795)) payloads.append(payload) return payloads @mock.patch.object(uuid, 'uuid4') @mock.patch.object(timeutils, 'utcnow') @mock.patch.object(messaging, 'get_notifier') def test_alarm_change_record(self, get_notifier, utcnow, mock_uuid): # the context.RequestContext() method need to generate uuid, # so we need to provide 'fake_uuid_0' and 'fake_uuid_1' for that. mock_uuid.side_effect = ['fake_event_id_0', 'fake_event_id_1'] change_notifier = mock.MagicMock() get_notifier.return_value = change_notifier utcnow.return_value = datetime.datetime(2015, 7, 26, 3, 33, 21, 876795) self._set_all_alarms('ok') with mock.patch('ceilometerclient.client.get_client', return_value=self.api_client): avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(1, 6)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(4)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) payloads = self._construct_payloads() expected_payloads = [mock.call(p) for p in payloads] change_records = \ self.storage_conn.record_alarm_change.call_args_list self.assertEqual(expected_payloads, change_records) notify_calls = change_notifier.info.call_args_list notification = "alarm.state_transition" expected_payloads = [mock.call(mock.ANY, notification, p) for p in payloads] self.assertEqual(expected_payloads, notify_calls) def test_equivocal_from_known_state(self): self._set_all_alarms('ok') avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(5)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(-1, 3)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual( [], self.storage_conn.update_alarm.call_args_list) self.assertEqual([], self.notifier.notify.call_args_list) def test_equivocal_from_known_state_and_repeat_actions(self): self._set_all_alarms('ok') self.alarms[1].repeat_actions = True avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(5)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(-1, 3)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) reason = ('Remaining as ok due to 1 samples inside' ' threshold, most recent: 8.0') reason_datas = self._reason_data('inside', 1, 8.0) expected = [mock.call(self.alarms[1], 'ok', reason, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_unequivocal_from_known_state_and_repeat_actions(self): self._set_all_alarms('alarm') self.alarms[1].repeat_actions = True avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(1, 6)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(4)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') self.assertEqual([], self.storage_conn.update_alarm.call_args_list) reason = ('Remaining as alarm due to 4 samples outside' ' threshold, most recent: 7.0') reason_datas = self._reason_data('outside', 4, 7.0) expected = [mock.call(self.alarms[1], 'alarm', reason, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_state_change_and_repeat_actions(self): self._set_all_alarms('ok') self.alarms[0].repeat_actions = True self.alarms[1].repeat_actions = True avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(1, 6)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(4)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons = ['Transition to alarm due to 5 samples outside' ' threshold, most recent: %s' % avgs[-1].avg, 'Transition to alarm due to 4 samples outside' ' threshold, most recent: %s' % maxs[-1].max] reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), self._reason_data('outside', 4, maxs[-1].max)] expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_evaluation_keep_alarm_attributes_constant(self): self._set_all_alarms('ok') original_alarms = copy.deepcopy(self.alarms) with mock.patch('ceilometerclient.client.get_client', return_value=self.api_client): avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(1, 6)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(4)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') primitive_alarms = [a.as_dict() for a in self.alarms] for alarm in original_alarms: alarm.state = 'alarm' primitive_original_alarms = [a.as_dict() for a in original_alarms] self.assertEqual(primitive_original_alarms, primitive_alarms) def test_equivocal_from_unknown(self): self._set_all_alarms('insufficient data') avgs = [self._get_stat('avg', self.alarms[0].rule['threshold'] + v) for v in moves.xrange(1, 6)] maxs = [self._get_stat('max', self.alarms[1].rule['threshold'] - v) for v in moves.xrange(-3, 1)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons = ['Transition to alarm due to 5 samples outside' ' threshold, most recent: %s' % avgs[-1].avg, 'Transition to alarm due to 1 samples outside' ' threshold, most recent: %s' % maxs[-1].max] reason_datas = [self._reason_data('outside', 5, avgs[-1].avg), self._reason_data('outside', 1, maxs[-1].max)] expected = [mock.call(alarm, 'insufficient data', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def _do_test_bound_duration(self, start, exclude_outliers=None): alarm = self.alarms[0] if exclude_outliers is not None: alarm.rule['exclude_outliers'] = exclude_outliers with mock.patch.object(timeutils, 'utcnow') as mock_utcnow: mock_utcnow.return_value = datetime.datetime(2012, 7, 2, 10, 45) constraint = self.evaluator._bound_duration(alarm.rule) self.assertEqual((start, timeutils.utcnow().isoformat()), constraint) def test_bound_duration_outlier_exclusion_defaulted(self): self._do_test_bound_duration('2012-07-02T10:39:00') def test_bound_duration_outlier_exclusion_clear(self): self._do_test_bound_duration('2012-07-02T10:39:00', False) def test_bound_duration_outlier_exclusion_set(self): self._do_test_bound_duration('2012-07-02T10:35:00', True) def _do_test_simple_alarm_trip_outlier_exclusion(self, exclude_outliers): self._set_all_rules('exclude_outliers', exclude_outliers) self._set_all_alarms('ok') # most recent datapoints inside threshold but with # anomalously low sample count threshold = self.alarms[0].rule['threshold'] avgs = [self._get_stat('avg', threshold + (v if v < 10 else -v), count=20 if v < 10 else 1) for v in moves.xrange(1, 11)] threshold = self.alarms[1].rule['threshold'] maxs = [self._get_stat('max', threshold - (v if v < 7 else -v), count=20 if v < 7 else 1) for v in moves.xrange(8)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('alarm' if exclude_outliers else 'ok') if exclude_outliers: expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons = ['Transition to alarm due to 5 samples outside' ' threshold, most recent: %s' % avgs[-2].avg, 'Transition to alarm due to 4 samples outside' ' threshold, most recent: %s' % maxs[-2].max] reason_datas = [self._reason_data('outside', 5, avgs[-2].avg), self._reason_data('outside', 4, maxs[-2].max)] expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_simple_alarm_trip_with_outlier_exclusion(self): self. _do_test_simple_alarm_trip_outlier_exclusion(True) def test_simple_alarm_no_trip_without_outlier_exclusion(self): self. _do_test_simple_alarm_trip_outlier_exclusion(False) def _do_test_simple_alarm_clear_outlier_exclusion(self, exclude_outliers): self._set_all_rules('exclude_outliers', exclude_outliers) self._set_all_alarms('alarm') # most recent datapoints outside threshold but with # anomalously low sample count threshold = self.alarms[0].rule['threshold'] avgs = [self._get_stat('avg', threshold - (v if v < 9 else -v), count=20 if v < 9 else 1) for v in moves.xrange(10)] threshold = self.alarms[1].rule['threshold'] maxs = [self._get_stat('max', threshold + (v if v < 8 else -v), count=20 if v < 8 else 1) for v in moves.xrange(1, 9)] self.api_client.statistics.list.side_effect = [avgs, maxs] self._evaluate_all_alarms() self._assert_all_alarms('ok' if exclude_outliers else 'alarm') if exclude_outliers: expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons = ['Transition to ok due to 5 samples inside' ' threshold, most recent: %s' % avgs[-2].avg, 'Transition to ok due to 4 samples inside' ' threshold, most recent: %s' % maxs[-2].max] reason_datas = [self._reason_data('inside', 5, avgs[-2].avg), self._reason_data('inside', 4, maxs[-2].max)] expected = [mock.call(alarm, 'alarm', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_simple_alarm_clear_with_outlier_exclusion(self): self. _do_test_simple_alarm_clear_outlier_exclusion(True) def test_simple_alarm_no_clear_without_outlier_exclusion(self): self. _do_test_simple_alarm_clear_outlier_exclusion(False) @mock.patch.object(timeutils, 'utcnow') def test_state_change_inside_time_constraint(self, mock_utcnow): self._set_all_alarms('ok') self.alarms[0].time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] self.alarms[1].time_constraints = self.alarms[0].time_constraints dt = datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.timezone('Europe/Ljubljana')) mock_utcnow.return_value = dt.astimezone(pytz.UTC) with mock.patch('ceilometerclient.client.get_client', return_value=self.api_client): # the following part based on test_simple_insufficient self.api_client.statistics.list.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls, "Alarm should change state if the current " "time is inside its time constraint.") expected = [mock.call( alarm, 'ok', ('%d datapoints are unknown' % alarm.rule['evaluation_periods']), self._reason_data('unknown', alarm.rule['evaluation_periods'], None)) for alarm in self.alarms] self.assertEqual(expected, self.notifier.notify.call_args_list) @mock.patch.object(timeutils, 'utcnow') def test_no_state_change_outside_time_constraint(self, mock_utcnow): self._set_all_alarms('ok') self.alarms[0].time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] self.alarms[1].time_constraints = self.alarms[0].time_constraints dt = datetime.datetime(2014, 1, 1, 15, 0, 0, tzinfo=pytz.timezone('Europe/Ljubljana')) mock_utcnow.return_value = dt.astimezone(pytz.UTC) self.api_client.statistics.list.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('ok') update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual([], update_calls, "Alarm should not change state if the current " " time is outside its time constraint.") self.assertEqual([], self.notifier.notify.call_args_list) aodh-2.0.6/aodh/tests/unit/evaluator/__init__.py0000664000567000056710000000000013076064371022737 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/unit/evaluator/test_combination.py0000664000567000056710000003773313076064372024571 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Authors: Mehdi Abaakouk # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/evaluator/combination.py """ import datetime import uuid from ceilometerclient import exc from ceilometerclient.v2 import alarms import mock from oslo_utils import timeutils import pytz from aodh.evaluator import combination from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base class TestEvaluate(base.TestEvaluatorBase): EVALUATOR = combination.CombinationEvaluator def prepare_alarms(self): self.alarms = [ models.Alarm(name='or-alarm', description='the or alarm', type='combination', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( alarm_ids=[ '9cfc3e51-2ff1-4b1d-ac01-c1bd4c6d0d1e', '1d441595-d069-4e05-95ab-8693ba6a8302'], operator='or', ), severity='critical'), models.Alarm(name='and-alarm', description='the and alarm', type='combination', enabled=True, user_id='foobar', project_id='snafu', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule=dict( alarm_ids=[ 'b82734f4-9d06-48f3-8a86-fa59a0c99dc8', '15a700e5-2fe8-4b3d-8c55-9e92831f6a2b'], operator='and', ), severity='critical') ] @staticmethod def _get_alarms(state): return [alarms.Alarm(None, {'state': state})] @staticmethod def _reason_data(alarm_ids): return {'type': 'combination', 'alarm_ids': alarm_ids} def _combination_transition_reason(self, state, alarm_ids1, alarm_ids2): return ([('Transition to %(state)s due to alarms %(alarm_ids)s' ' in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarm_ids1)}, ('Transition to %(state)s due to alarms %(alarm_ids)s' ' in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarm_ids2)}], [self._reason_data(alarm_ids1), self._reason_data(alarm_ids2)]) def _combination_remaining_reason(self, state, alarm_ids1, alarm_ids2): return ([('Remaining as %(state)s due to alarms %(alarm_ids)s' ' in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarm_ids1)}, ('Remaining as %(state)s due to alarms %(alarm_ids)s' ' in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarm_ids2)}], [self._reason_data(alarm_ids1), self._reason_data(alarm_ids2)]) def test_retry_transient_api_failure(self): broken = exc.CommunicationError(message='broken') self.storage_conn.get_alarms.side_effect = [ broken, broken, broken, broken, self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), ] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') self._evaluate_all_alarms() self._assert_all_alarms('ok') def test_simple_insufficient(self): self._set_all_alarms('ok') broken = exc.CommunicationError(message='broken') self.storage_conn.get_alarms.side_effect = broken self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) expected = [mock.call( alarm, 'ok', ('Alarms %s are in unknown state' % (",".join(alarm.rule['alarm_ids']))), self._reason_data(alarm.rule['alarm_ids'])) for alarm in self.alarms] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_to_ok_with_all_ok(self): self._set_all_alarms('insufficient data') self.storage_conn.get_alarms.side_effect = [ self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), ] self._evaluate_all_alarms() expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons, reason_datas = self._combination_transition_reason( 'ok', self.alarms[0].rule['alarm_ids'], self.alarms[1].rule['alarm_ids']) expected = [mock.call(alarm, 'insufficient data', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_to_ok_with_one_alarm(self): self._set_all_alarms('alarm') self.storage_conn.get_alarms.side_effect = [ self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('alarm'), self._get_alarms('ok'), ] self._evaluate_all_alarms() expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons, reason_datas = self._combination_transition_reason( 'ok', self.alarms[0].rule['alarm_ids'], [self.alarms[1].rule['alarm_ids'][1]]) expected = [mock.call(alarm, 'alarm', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_to_alarm_with_all_alarm(self): self._set_all_alarms('ok') self.storage_conn.get_alarms.side_effect = [ self._get_alarms('alarm'), self._get_alarms('alarm'), self._get_alarms('alarm'), self._get_alarms('alarm'), ] self._evaluate_all_alarms() expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons, reason_datas = self._combination_transition_reason( 'alarm', self.alarms[0].rule['alarm_ids'], self.alarms[1].rule['alarm_ids']) expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_to_alarm_with_one_insufficient_data(self): self._set_all_alarms('ok') self.storage_conn.get_alarms.side_effect = [ self._get_alarms('insufficient data'), self._get_alarms('alarm'), self._get_alarms('alarm'), self._get_alarms('alarm'), ] self._evaluate_all_alarms() expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons, reason_datas = self._combination_transition_reason( 'alarm', [self.alarms[0].rule['alarm_ids'][1]], self.alarms[1].rule['alarm_ids']) expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_to_alarm_with_one_ok(self): self._set_all_alarms('ok') self.storage_conn.get_alarms.side_effect = [ self._get_alarms('ok'), self._get_alarms('alarm'), self._get_alarms('alarm'), self._get_alarms('alarm'), ] self._evaluate_all_alarms() expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons, reason_datas = self._combination_transition_reason( 'alarm', [self.alarms[0].rule['alarm_ids'][1]], self.alarms[1].rule['alarm_ids']) expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_to_unknown(self): self._set_all_alarms('ok') broken = exc.CommunicationError(message='broken') self.storage_conn.get_alarms.side_effect = [ broken, self._get_alarms('ok'), self._get_alarms('insufficient data'), self._get_alarms('ok'), ] self._evaluate_all_alarms() expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) reasons = ['Alarms %s are in unknown state' % self.alarms[0].rule['alarm_ids'][0], 'Alarms %s are in unknown state' % self.alarms[1].rule['alarm_ids'][0]] reason_datas = [ self._reason_data([self.alarms[0].rule['alarm_ids'][0]]), self._reason_data([self.alarms[1].rule['alarm_ids'][0]])] expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_no_state_change(self): self._set_all_alarms('ok') self.storage_conn.get_alarms.side_effect = [ self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), ] self._evaluate_all_alarms() update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual([], update_calls) self.assertEqual([], self.notifier.notify.call_args_list) def test_no_state_change_and_repeat_actions(self): self.alarms[0].repeat_actions = True self.alarms[1].repeat_actions = True self._set_all_alarms('ok') self.storage_conn.get_alarms.side_effect = [ self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), ] self._evaluate_all_alarms() update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual([], update_calls) reasons, reason_datas = self._combination_remaining_reason( 'ok', self.alarms[0].rule['alarm_ids'], self.alarms[1].rule['alarm_ids']) expected = [mock.call(alarm, 'ok', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) @mock.patch.object(timeutils, 'utcnow') def test_state_change_inside_time_constraint(self, mock_utcnow): self._set_all_alarms('insufficient data') self.alarms[0].time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] self.alarms[1].time_constraints = self.alarms[0].time_constraints dt = datetime.datetime(2014, 1, 1, 12, 0, 0, tzinfo=pytz.timezone('Europe/Ljubljana')) mock_utcnow.return_value = dt.astimezone(pytz.UTC) self.storage_conn.get_alarms.side_effect = [ self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), ] self._evaluate_all_alarms() expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls, "Alarm should change state if the current " "time is inside its time constraint.") reasons, reason_datas = self._combination_transition_reason( 'ok', self.alarms[0].rule['alarm_ids'], self.alarms[1].rule['alarm_ids']) expected = [mock.call(alarm, 'insufficient data', reason, reason_data) for alarm, reason, reason_data in zip(self.alarms, reasons, reason_datas)] self.assertEqual(expected, self.notifier.notify.call_args_list) @mock.patch.object(timeutils, 'utcnow') def test_no_state_change_outside_time_constraint(self, mock_utcnow): self._set_all_alarms('insufficient data') self.alarms[0].time_constraints = [ {'name': 'test', 'description': 'test', 'start': '0 11 * * *', # daily at 11:00 'duration': 10800, # 3 hours 'timezone': 'Europe/Ljubljana'} ] self.alarms[1].time_constraints = self.alarms[0].time_constraints dt = datetime.datetime(2014, 1, 1, 15, 0, 0, tzinfo=pytz.timezone('Europe/Ljubljana')) mock_utcnow.return_value = dt.astimezone(pytz.UTC) self.storage_conn.get_alarms.side_effect = [ self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), self._get_alarms('ok'), ] self._evaluate_all_alarms() update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual([], update_calls, "Alarm should not change state if the current " " time is outside its time constraint.") self.assertEqual([], self.notifier.notify.call_args_list) aodh-2.0.6/aodh/tests/unit/evaluator/base.py0000664000567000056710000000403313076064372022125 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture from oslotest import base from oslotest import mockpatch from aodh import service class TestEvaluatorBase(base.BaseTestCase): def setUp(self): super(TestEvaluatorBase, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.conf = self.useFixture(fixture.Config(conf)).conf self.api_client = mock.Mock() self.useFixture(mockpatch.Patch('ceilometerclient.client.get_client', return_value=self.api_client)) self.evaluator = self.EVALUATOR(self.conf) self.notifier = mock.MagicMock() self.evaluator.notifier = self.notifier self.storage_conn = mock.MagicMock() self.evaluator.storage_conn = self.storage_conn self.evaluator._ks_client = mock.Mock(user_id='fake_user_id', project_id='fake_project_id', auth_token='fake_token') self.prepare_alarms() def prepare_alarms(self): self.alarms = [] def _evaluate_all_alarms(self): for alarm in self.alarms: self.evaluator.evaluate(alarm) def _set_all_alarms(self, state): for alarm in self.alarms: alarm.state = state def _assert_all_alarms(self, state): for alarm in self.alarms: self.assertEqual(state, alarm.state) aodh-2.0.6/aodh/tests/unit/evaluator/test_composite.py0000664000567000056710000004405413076064372024263 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/evaluator/composite.py """ import uuid from ceilometerclient.v2 import statistics import mock from oslo_utils import timeutils from oslotest import mockpatch import six from six import moves from aodh import evaluator from aodh.evaluator import composite from aodh.storage import models from aodh.tests import constants from aodh.tests.unit.evaluator import base class TestEvaluate(base.TestEvaluatorBase): EVALUATOR = composite.CompositeEvaluator sub_rule1 = { "type": "threshold", "meter_name": "cpu_util", "evaluation_periods": 5, "threshold": 0.8, "query": [{ "field": "metadata.metering.stack_id", "value": "36b20eb3-d749-4964-a7d2-a71147cd8145", "op": "eq" }], "statistic": "avg", "period": 60, "exclude_outliers": False, "comparison_operator": "gt" } sub_rule2 = { "type": "threshold", "meter_name": "disk.iops", "evaluation_periods": 4, "threshold": 200, "query": [{ "field": "metadata.metering.stack_id", "value": "36b20eb3-d749-4964-a7d2-a71147cd8145", "op": "eq" }], "statistic": "max", "period": 60, "exclude_outliers": False, "comparison_operator": "gt" } sub_rule3 = { "type": "threshold", "meter_name": "network.incoming.packets.rate", "evaluation_periods": 3, "threshold": 1000, "query": [{ "field": "metadata.metering.stack_id", "value": "36b20eb3-d749-4964-a7d2-a71147cd8145", "op": "eq" }], "statistic": "avg", "period": 60, "exclude_outliers": False, "comparison_operator": "gt" } sub_rule4 = { "type": "gnocchi_resources_threshold", 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 5, 'aggregation_method': 'mean', 'granularity': 60, 'metric': 'cpu_util', 'resource_type': 'instance', 'resource_id': 'my_instance', } sub_rule5 = { "type": "gnocchi_aggregation_by_metrics_threshold", 'comparison_operator': 'le', 'threshold': 10.0, 'evaluation_periods': 4, 'aggregation_method': 'max', 'granularity': 300, 'metrics': ['0bb1604d-1193-4c0a-b4b8-74b170e35e83', '9ddc209f-42f8-41e1-b8f1-8804f59c4053'] } sub_rule6 = { "type": "gnocchi_aggregation_by_resources_threshold", 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 6, 'aggregation_method': 'mean', 'granularity': 50, 'metric': 'cpu_util', 'resource_type': 'instance', 'query': '{"=": {"server_group": "my_autoscaling_group"}}' } def prepare_alarms(self): self.alarms = [ models.Alarm(name='alarm_threshold_nest', description='alarm with sub rules nested combined', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule={ "or": [self.sub_rule1, {"and": [self.sub_rule2, self.sub_rule3] }] }, severity='critical'), models.Alarm(name='alarm_threshold_or', description='alarm on one of sub rules triggered', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=str(uuid.uuid4()), time_constraints=[], rule={ "or": [self.sub_rule1, self.sub_rule2, self.sub_rule3] }, severity='critical' ), models.Alarm(name='alarm_threshold_and', description='alarm on all the sub rules triggered', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, alarm_id=str(uuid.uuid4()), time_constraints=[], rule={ "and": [self.sub_rule1, self.sub_rule2, self.sub_rule3] }, severity='critical' ), models.Alarm(name='alarm_multi_type_rules', description='alarm with threshold and gnocchi rules', type='composite', enabled=True, user_id='fake_user', project_id='fake_project', alarm_id=str(uuid.uuid4()), state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, insufficient_data_actions=[], ok_actions=[], alarm_actions=[], repeat_actions=False, time_constraints=[], rule={ "and": [self.sub_rule2, self.sub_rule3, {'or': [self.sub_rule1, self.sub_rule4, self.sub_rule5, self.sub_rule6]}] }, severity='critical' ), ] def setUp(self): self.client = self.useFixture(mockpatch.Patch( 'aodh.evaluator.gnocchi.client' )).mock.Client.return_value super(TestEvaluate, self).setUp() @staticmethod def _get_stats(attr, value, count=1): return statistics.Statistics(None, {attr: value, 'count': count}) @staticmethod def _get_gnocchi_stats(granularity, values): now = timeutils.utcnow_ts() return [[six.text_type(now - len(values) * granularity), granularity, value] for value in values] @staticmethod def _reason(new_state, user_expression, causative_rules=(), transition=True): root_cause_rules = {} for index, rule in causative_rules: name = 'rule%s' % index root_cause_rules.update({name: rule}) description = {evaluator.ALARM: 'outside their threshold.', evaluator.OK: 'inside their threshold.', evaluator.UNKNOWN: 'state evaluated to unknown.'} params = {'state': new_state, 'expression': user_expression, 'rules': ', '.join(sorted(six.iterkeys(root_cause_rules))), 'description': description[new_state]} reason_data = { 'type': 'composite', 'composition_form': user_expression} reason_data.update(causative_rules=root_cause_rules) if transition: reason = ('Composite rule alarm with composition form: ' '%(expression)s transition to %(state)s, due to ' 'rules: %(rules)s %(description)s' % params) else: reason = ('Composite rule alarm with composition form: ' '%(expression)s remaining as %(state)s, due to ' 'rules: %(rules)s %(description)s' % params) return reason, reason_data def test_simple_insufficient(self): self._set_all_alarms('ok') self.api_client.statistics.list.return_value = [] self.client.metric.aggregation.return_value = [] self.client.metric.get_measures.return_value = [] self._evaluate_all_alarms() self._assert_all_alarms('insufficient data') expected = [mock.call(alarm) for alarm in self.alarms] update_calls = self.storage_conn.update_alarm.call_args_list self.assertEqual(expected, update_calls) expected = [mock.call(self.alarms[0], 'ok', *self._reason( 'insufficient data', '(rule1 or (rule2 and rule3))', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)))), mock.call(self.alarms[1], 'ok', *self._reason( 'insufficient data', '(rule1 or rule2 or rule3)', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)))), mock.call(self.alarms[2], 'ok', *self._reason( 'insufficient data', '(rule1 and rule2 and rule3)', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)))), mock.call( self.alarms[3], 'ok', *self._reason( 'insufficient data', '(rule1 and rule2 and (rule3 or rule4 or rule5 ' 'or rule6))', ((1, self.sub_rule2), (2, self.sub_rule3), (3, self.sub_rule1), (4, self.sub_rule4), (5, self.sub_rule5), (6, self.sub_rule6))))] self.assertEqual(expected, self.notifier.notify.call_args_list) def test_alarm_full_trip_with_multi_type_rules(self): alarm = self.alarms[3] alarm.state = 'ok' # following results of sub-rules evaluation to trigger # final "alarm" state: # self.sub_rule2: alarm # self.sub_rule3: alarm # self.sub_rule1: ok # self.sub_rule4: ok # self.sub_rule5: ok # self.sub_rule6: alarm maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) for v in moves.xrange(1, 5)] avgs1 = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) for v in moves.xrange(1, 4)] avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) for v in moves.xrange(1, 6)] gavgs1 = self._get_gnocchi_stats(60, [self.sub_rule4['threshold'] - v for v in moves.xrange(1, 6)]) gmaxs = self._get_gnocchi_stats(300, [self.sub_rule5['threshold'] + v for v in moves.xrange(1, 5)]) gavgs2 = self._get_gnocchi_stats(50, [self.sub_rule6['threshold'] + v for v in moves.xrange(1, 7)]) self.api_client.statistics.list.side_effect = [maxs, avgs1, avgs2] self.client.metric.get_measures.side_effect = [gavgs1] self.client.metric.aggregation.side_effect = [gmaxs, gavgs2] self.evaluator.evaluate(alarm) self.assertEqual(3, self.api_client.statistics.list.call_count) self.assertEqual(1, self.client.metric.get_measures.call_count) self.assertEqual(2, self.client.metric.aggregation.call_count) self.assertEqual('alarm', alarm.state) expected = mock.call( alarm, 'ok', *self._reason( 'alarm', '(rule1 and rule2 and (rule3 or rule4 or rule5 or rule6))', ((1, self.sub_rule2), (2, self.sub_rule3), (6, self.sub_rule6)))) self.assertEqual(expected, self.notifier.notify.call_args) def test_alarm_with_short_circuit_logic(self): alarm = self.alarms[1] # self.sub_rule1: alarm avgs = [self._get_stats('avg', self.sub_rule1['threshold'] + 0.01 * v) for v in moves.xrange(1, 6)] self.api_client.statistics.list.side_effect = [avgs] self.evaluator.evaluate(alarm) self.assertEqual('alarm', alarm.state) self.assertEqual(1, self.api_client.statistics.list.call_count) expected = mock.call(self.alarms[1], 'insufficient data', *self._reason( 'alarm', '(rule1 or rule2 or rule3)', ((1, self.sub_rule1),))) self.assertEqual(expected, self.notifier.notify.call_args) def test_ok_with_short_circuit_logic(self): alarm = self.alarms[2] # self.sub_rule1: ok avgs = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) for v in moves.xrange(1, 6)] self.api_client.statistics.list.side_effect = [avgs] self.evaluator.evaluate(alarm) self.assertEqual('ok', alarm.state) self.assertEqual(1, self.api_client.statistics.list.call_count) expected = mock.call(self.alarms[2], 'insufficient data', *self._reason( 'ok', '(rule1 and rule2 and rule3)', ((1, self.sub_rule1),))) self.assertEqual(expected, self.notifier.notify.call_args) def test_unknown_state_with_sub_rules_trending_state(self): alarm = self.alarms[0] maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) for v in moves.xrange(-1, 4)] avgs = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) for v in moves.xrange(-1, 3)] avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) for v in moves.xrange(1, 6)] self.api_client.statistics.list.side_effect = [avgs2, maxs, avgs] self.evaluator.evaluate(alarm) self.assertEqual('alarm', alarm.state) expected = mock.call(self.alarms[0], 'insufficient data', *self._reason( 'alarm', '(rule1 or (rule2 and rule3))', ((2, self.sub_rule2), (3, self.sub_rule3)))) self.assertEqual(expected, self.notifier.notify.call_args) def test_known_state_with_sub_rules_trending_state(self): alarm = self.alarms[0] alarm.repeat_actions = True alarm.state = 'ok' maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) for v in moves.xrange(-1, 4)] avgs = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) for v in moves.xrange(-1, 3)] avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) for v in moves.xrange(1, 6)] self.api_client.statistics.list.side_effect = [avgs2, maxs, avgs] self.evaluator.evaluate(alarm) self.assertEqual('ok', alarm.state) expected = mock.call(self.alarms[0], 'ok', *self._reason( 'ok', '(rule1 or (rule2 and rule3))', ((1, self.sub_rule1), (2, self.sub_rule2), (3, self.sub_rule3)), False)) self.assertEqual(expected, self.notifier.notify.call_args) def test_known_state_with_sub_rules_trending_state_and_not_repeat(self): alarm = self.alarms[2] alarm.state = 'ok' maxs = [self._get_stats('max', self.sub_rule2['threshold'] + 0.01 * v) for v in moves.xrange(-1, 4)] avgs = [self._get_stats('avg', self.sub_rule3['threshold'] + 0.01 * v) for v in moves.xrange(-1, 3)] avgs2 = [self._get_stats('avg', self.sub_rule1['threshold'] - 0.01 * v) for v in moves.xrange(1, 6)] self.api_client.statistics.list.side_effect = [avgs2, maxs, avgs] self.evaluator.evaluate(alarm) self.assertEqual('ok', alarm.state) self.assertEqual([], self.notifier.notify.mock_calls) aodh-2.0.6/aodh/tests/unit/test_bin.py0000664000567000056710000001641313076064372021025 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2012-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import random import subprocess import time from oslo_utils import fileutils import requests import six from aodh.tests import base class BinTestCase(base.BaseTestCase): def setUp(self): super(BinTestCase, self).setUp() content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='aodh', suffix='.conf') def tearDown(self): super(BinTestCase, self).tearDown() os.remove(self.tempfile) def test_dbsync_run(self): subp = subprocess.Popen(['aodh-dbsync', "--config-file=%s" % self.tempfile]) self.assertEqual(0, subp.wait()) def test_run_expirer_ttl_disabled(self): subp = subprocess.Popen(['aodh-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, __ = subp.communicate() self.assertEqual(0, subp.poll()) self.assertIn(b"Nothing to clean, database alarm history " b"time to live is disabled", out) def test_run_expirer_ttl_enabled(self): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "alarm_history_time_to_live=1\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='aodh', suffix='.conf') subp = subprocess.Popen(['aodh-expirer', '-d', "--config-file=%s" % self.tempfile], stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, __ = subp.communicate() self.assertEqual(0, subp.poll()) msg = "Dropping alarm history data with TTL 1" if six.PY3: msg = msg.encode('utf-8') self.assertIn(msg, out) class BinApiTestCase(base.BaseTestCase): def setUp(self): super(BinApiTestCase, self).setUp() # create api_paste.ini file without authentication content = ("[pipeline:main]\n" "pipeline = api-server\n" "[app:api-server]\n" "paste.app_factory = aodh.api.app:app_factory\n") if six.PY3: content = content.encode('utf-8') self.paste = fileutils.write_to_tempfile(content=content, prefix='api_paste', suffix='.ini') # create aodh.conf file self.api_port = random.randint(10000, 11000) self.pipeline_cfg_file = self.path_get('etc/aodh/pipeline.yaml') self.policy_file = self.path_get('aodh/tests/open-policy.json') def tearDown(self): super(BinApiTestCase, self).tearDown() try: self.subp.kill() self.subp.wait() except OSError: pass os.remove(self.tempfile) def get_response(self, path): url = 'http://%s:%d/%s' % ('127.0.0.1', self.api_port, path) for x in range(10): try: r = requests.get(url) except requests.exceptions.ConnectionError: time.sleep(.5) self.assertIsNone(self.subp.poll()) else: return r return None def run_api(self, content, err_pipe=None): if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='aodh', suffix='.conf') if err_pipe: return subprocess.Popen(['aodh-api', "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) else: return subprocess.Popen(['aodh-api', "--config-file=%s" % self.tempfile]) def test_v2(self): content = ("[DEFAULT]\n" "rpc_backend=fake\n" "auth_strategy=noauth\n" "debug=true\n" "pipeline_cfg_file={0}\n" "[api]\n" "paste_config={2}\n" "port={3}\n" "[oslo_policy]\n" "policy_file={1}\n" "[database]\n" "connection=log://localhost\n". format(self.pipeline_cfg_file, self.policy_file, self.paste, self.api_port)) self.subp = self.run_api(content) response = self.get_response('v2/alarms') self.assertEqual(200, response.status_code) self.assertEqual([], response.json()) class BinEvaluatorTestCase(base.BaseTestCase): def setUp(self): super(BinEvaluatorTestCase, self).setUp() content = ("[DEFAULT]\n" "rpc_backend=fake\n" "[database]\n" "connection=log://localhost\n") if six.PY3: content = content.encode('utf-8') self.tempfile = fileutils.write_to_tempfile(content=content, prefix='aodh', suffix='.conf') self.subp = None def tearDown(self): super(BinEvaluatorTestCase, self).tearDown() if self.subp: self.subp.kill() os.remove(self.tempfile) def test_starting_evaluator(self): self.subp = subprocess.Popen(['aodh-evaluator', "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) self.assertIsNone(self.subp.poll()) class BinNotifierTestCase(BinEvaluatorTestCase): def test_starting_notifier(self): self.subp = subprocess.Popen(['aodh-notifier', "--config-file=%s" % self.tempfile], stderr=subprocess.PIPE) self.assertIsNone(self.subp.poll()) aodh-2.0.6/aodh/tests/unit/test_notifier.py0000664000567000056710000003060313076064372022071 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture as fixture_config from oslo_serialization import jsonutils from oslotest import mockpatch import requests import six.moves.urllib.parse as urlparse from aodh import notifier from aodh import service from aodh.tests import base as tests_base DATA_JSON = jsonutils.loads( '{"current": "ALARM", "alarm_id": "foobar", "alarm_name": "testalarm",' ' "severity": "critical", "reason": "what ?",' ' "reason_data": {"test": "test"}, "previous": "OK"}' ) NOTIFICATION = dict(alarm_id='foobar', alarm_name='testalarm', severity='critical', condition=dict(threshold=42), reason='what ?', reason_data={'test': 'test'}, previous='OK', current='ALARM') class TestAlarmNotifierService(tests_base.BaseTestCase): def setUp(self): super(TestAlarmNotifierService, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.setup_messaging(self.CONF) def test_init_host_rpc(self): self.CONF.set_override('ipc_protocol', 'rpc') self.service = notifier.AlarmNotifierService(self.CONF) self.service.start() self.service.stop() def test_init_host_queue(self): self.service = notifier.AlarmNotifierService(self.CONF) self.service.start() self.service.stop() class TestAlarmNotifier(tests_base.BaseTestCase): def setUp(self): super(TestAlarmNotifier, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.setup_messaging(self.CONF) self.zaqar = FakeZaqarClient(self) self.useFixture(mockpatch.Patch( 'aodh.notifier.zaqar.ZaqarAlarmNotifier.get_zaqar_client', return_value=self.zaqar)) self.service = notifier.AlarmNotifierService(self.CONF) self.useFixture(mockpatch.Patch( 'oslo_context.context.generate_request_id', self._fake_generate_request_id)) def test_notify_alarm(self): data = { 'actions': ['test://'], 'alarm_id': 'foobar', 'alarm_name': 'testalarm', 'severity': 'critical', 'previous': 'OK', 'current': 'ALARM', 'reason': 'Everything is on fire', 'reason_data': {'fire': 'everywhere'} } self.service.notify_alarm({}, data) notifications = self.service.notifiers['test'].obj.notifications self.assertEqual(1, len(notifications)) self.assertEqual((urlparse.urlsplit(data['actions'][0]), data['alarm_id'], data['alarm_name'], data['severity'], data['previous'], data['current'], data['reason'], data['reason_data']), notifications[0]) def test_notify_alarm_no_action(self): self.service.notify_alarm({}, {}) def test_notify_alarm_log_action(self): self.service.notify_alarm({}, { 'actions': ['log://'], 'alarm_id': 'foobar', 'condition': {'threshold': 42}}) @staticmethod def _notification(action): notification = {} notification.update(NOTIFICATION) notification['actions'] = [action] return notification HTTP_HEADERS = {'x-openstack-request-id': 'fake_request_id', 'content-type': 'application/json'} def _fake_generate_request_id(self): return self.HTTP_HEADERS['x-openstack-request-id'] def test_notify_alarm_rest_action_ok(self): action = 'http://host/action' with mock.patch.object(requests.Session, 'post') as poster: self.service.notify_alarm({}, self._notification(action)) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY) args, kwargs = poster.call_args self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_client_cert(self): action = 'https://host/action' certificate = "/etc/ssl/cert/whatever.pem" self.CONF.set_override("rest_notifier_certificate_file", certificate) with mock.patch.object(requests.Session, 'post') as poster: self.service.notify_alarm({}, self._notification(action)) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, cert=certificate, verify=True) args, kwargs = poster.call_args self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_client_cert_and_key(self): action = 'https://host/action' certificate = "/etc/ssl/cert/whatever.pem" key = "/etc/ssl/cert/whatever.key" self.CONF.set_override("rest_notifier_certificate_file", certificate) self.CONF.set_override("rest_notifier_certificate_key", key) with mock.patch.object(requests.Session, 'post') as poster: self.service.notify_alarm({}, self._notification(action)) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, cert=(certificate, key), verify=True) args, kwargs = poster.call_args self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_verify_disable_by_cfg(self): action = 'https://host/action' self.CONF.set_override("rest_notifier_ssl_verify", False) with mock.patch.object(requests.Session, 'post') as poster: self.service.notify_alarm({}, self._notification(action)) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, verify=False) args, kwargs = poster.call_args self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_verify_disable(self): action = 'https://host/action?aodh-alarm-ssl-verify=0' with mock.patch.object(requests.Session, 'post') as poster: self.service.notify_alarm({}, self._notification(action)) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, verify=False) args, kwargs = poster.call_args self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) def test_notify_alarm_rest_action_with_ssl_verify_enable_by_user(self): action = 'https://host/action?aodh-alarm-ssl-verify=1' self.CONF.set_override("rest_notifier_ssl_verify", False) with mock.patch.object(requests.Session, 'post') as poster: self.service.notify_alarm({}, self._notification(action)) poster.assert_called_with(action, data=mock.ANY, headers=mock.ANY, verify=True) args, kwargs = poster.call_args self.assertEqual(self.HTTP_HEADERS, kwargs['headers']) self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) @staticmethod def _fake_urlsplit(*args, **kwargs): raise Exception("Evil urlsplit!") def test_notify_alarm_invalid_url(self): with mock.patch('oslo_utils.netutils.urlsplit', self._fake_urlsplit): LOG = mock.MagicMock() with mock.patch('aodh.notifier.LOG', LOG): self.service.notify_alarm( {}, { 'actions': ['no-such-action-i-am-sure'], 'alarm_id': 'foobar', 'condition': {'threshold': 42}, }) self.assertTrue(LOG.error.called) def test_notify_alarm_invalid_action(self): LOG = mock.MagicMock() with mock.patch('aodh.notifier.LOG', LOG): self.service.notify_alarm( {}, { 'actions': ['no-such-action-i-am-sure://'], 'alarm_id': 'foobar', 'condition': {'threshold': 42}, }) self.assertTrue(LOG.error.called) def test_notify_alarm_trust_action(self): action = 'trust+http://trust-1234@host/action' url = 'http://host/action' client = mock.MagicMock() client.session.auth.get_access.return_value.auth_token = 'token_1234' headers = {'X-Auth-Token': 'token_1234'} headers.update(self.HTTP_HEADERS) self.useFixture( mockpatch.Patch('aodh.keystone_client.get_trusted_client', lambda *args: client)) with mock.patch.object(requests.Session, 'post') as poster: self.service.notify_alarm({}, self._notification(action)) headers = {'X-Auth-Token': 'token_1234'} headers.update(self.HTTP_HEADERS) poster.assert_called_with( url, data=mock.ANY, headers=mock.ANY) args, kwargs = poster.call_args self.assertEqual(headers, kwargs['headers']) self.assertEqual(DATA_JSON, jsonutils.loads(kwargs['data'])) def test_zaqar_notifier_action(self): action = 'zaqar://?topic=critical&subscriber=http://example.com/data' \ '&subscriber=mailto:foo@example.com&ttl=7200' self.service.notify_alarm({}, self._notification(action)) self.assertEqual(self.zaqar, self.service.notifiers['zaqar'].obj.client) class FakeZaqarClient(object): def __init__(self, testcase): self.client = testcase def queue(self, queue_name, **kwargs): self.client.assertEqual('foobar-critical', queue_name) self.client.assertEqual(dict(force_create=True), kwargs) return FakeZaqarQueue(self.client) def subscription(self, queue_name, **kwargs): self.client.assertEqual('foobar-critical', queue_name) subscribers = ['http://example.com/data', 'mailto:foo@example.com'] self.client.assertIn(kwargs['subscriber'], subscribers) self.client.assertEqual('7200', kwargs['ttl']) class FakeZaqarQueue(object): def __init__(self, testcase): self.queue = testcase def post(self, message): expected_message = {'body': {'alarm_name': 'testalarm', 'reason_data': {'test': 'test'}, 'current': 'ALARM', 'alarm_id': 'foobar', 'reason': 'what ?', 'severity': 'critical', 'previous': 'OK'}} self.queue.assertEqual(expected_message, message) aodh-2.0.6/aodh/tests/unit/test_storage.py0000664000567000056710000000241313076064372021714 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from aodh.storage import base as storage_base class TestUtils(base.BaseTestCase): def test_dict_to_kv(self): data = {'a': 'A', 'b': 'B', 'nested': {'a': 'A', 'b': 'B', }, 'nested2': [{'c': 'A'}, {'c': 'B'}] } pairs = list(storage_base.dict_to_keyval(data)) self.assertEqual([('a', 'A'), ('b', 'B'), ('nested.a', 'A'), ('nested.b', 'B'), ('nested2[0].c', 'A'), ('nested2[1].c', 'B')], sorted(pairs, key=lambda x: x[0])) aodh-2.0.6/aodh/tests/unit/test_rpc.py0000664000567000056710000001342413076064372021040 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Authors: Mehdi Abaakouk # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import uuid from ceilometerclient.v2 import alarms from oslo_config import fixture as fixture_config import six from aodh import messaging from aodh import rpc from aodh import service from aodh.storage import models from aodh.tests import base as tests_base class FakeNotifier(object): def __init__(self, conf, transport): self.rpc = messaging.get_rpc_server( conf, transport, "alarm_notifier", self) self.notified = [] def start(self, expected_length): self.expected_length = expected_length self.rpc.start() def notify_alarm(self, context, data): self.notified.append(data) if len(self.notified) == self.expected_length: self.rpc.stop() class TestRPCAlarmNotifier(tests_base.BaseTestCase): def setUp(self): super(TestRPCAlarmNotifier, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.setup_messaging(self.CONF) self.notifier_server = FakeNotifier(self.CONF, self.transport) self.notifier = rpc.RPCAlarmNotifier(self.CONF) self.alarms = [ alarms.Alarm(None, info={ 'name': 'instance_running_hot', 'meter_name': 'cpu_util', 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 5, 'statistic': 'avg', 'state': 'ok', 'ok_actions': ['http://host:8080/path'], 'user_id': 'foobar', 'project_id': 'snafu', 'period': 60, 'alarm_id': str(uuid.uuid4()), 'severity': 'critical', 'matching_metadata':{'resource_id': 'my_instance'} }), alarms.Alarm(None, info={ 'name': 'group_running_idle', 'meter_name': 'cpu_util', 'comparison_operator': 'le', 'threshold': 10.0, 'statistic': 'max', 'evaluation_periods': 4, 'state': 'insufficient data', 'insufficient_data_actions': ['http://other_host/path'], 'user_id': 'foobar', 'project_id': 'snafu', 'period': 300, 'alarm_id': str(uuid.uuid4()), 'severity': 'critical', 'matching_metadata':{'metadata.user_metadata.AS': 'my_group'} }), ] def test_rpc_target(self): topic = self.notifier.client.target.topic self.assertEqual('alarm_notifier', topic) def test_notify_alarm(self): self.notifier_server.start(2) previous = ['alarm', 'ok'] for i, a in enumerate(self.alarms): self.notifier.notify(a, previous[i], "what? %d" % i, {'fire': '%d' % i}) self.notifier_server.rpc.wait() self.assertEqual(2, len(self.notifier_server.notified)) for i, a in enumerate(self.alarms): actions = getattr(a, models.Alarm.ALARM_ACTIONS_MAP[a.state]) self.assertEqual(self.alarms[i].alarm_id, self.notifier_server.notified[i]["alarm_id"]) self.assertEqual(self.alarms[i].name, self.notifier_server.notified[i]["alarm_name"]) self.assertEqual(self.alarms[i].severity, self.notifier_server.notified[i]["severity"]) self.assertEqual(actions, self.notifier_server.notified[i]["actions"]) self.assertEqual(previous[i], self.notifier_server.notified[i]["previous"]) self.assertEqual(self.alarms[i].state, self.notifier_server.notified[i]["current"]) self.assertEqual("what? %d" % i, self.notifier_server.notified[i]["reason"]) self.assertEqual({'fire': '%d' % i}, self.notifier_server.notified[i]["reason_data"]) def test_notify_non_string_reason(self): self.notifier_server.start(1) self.notifier.notify(self.alarms[0], 'ok', 42, {}) self.notifier_server.rpc.wait() reason = self.notifier_server.notified[0]['reason'] self.assertIsInstance(reason, six.string_types) def test_notify_no_actions(self): alarm = alarms.Alarm(None, info={ 'name': 'instance_running_hot', 'meter_name': 'cpu_util', 'comparison_operator': 'gt', 'threshold': 80.0, 'evaluation_periods': 5, 'statistic': 'avg', 'state': 'ok', 'user_id': 'foobar', 'project_id': 'snafu', 'period': 60, 'ok_actions': [], 'alarm_id': str(uuid.uuid4()), 'matching_metadata': {'resource_id': 'my_instance'} }) self.notifier.notify(alarm, 'alarm', "what?", {}) self.assertEqual(0, len(self.notifier_server.notified)) aodh-2.0.6/aodh/tests/__init__.py0000664000567000056710000000000013076064371017756 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/0000775000567000056710000000000013076064720020017 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/db.py0000664000567000056710000001501313076064372020761 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests.""" import os import uuid import fixtures import mock from oslo_config import fixture as fixture_config from oslotest import mockpatch import six from six.moves.urllib import parse as urlparse from testtools import testcase from aodh import service from aodh import storage from aodh.tests import base as test_base try: from aodh.tests import mocks except ImportError: mocks = None # happybase module is not Python 3 compatible yet class MongoDbManager(fixtures.Fixture): def __init__(self, conf): self.url = '%(url)s_%(db)s' % { 'url': conf.database.connection, 'db': uuid.uuid4().hex, } class SQLManager(fixtures.Fixture): def __init__(self, conf): self.conf = conf db_name = 'aodh_%s' % uuid.uuid4().hex import sqlalchemy self._engine = sqlalchemy.create_engine(conf.database.connection) self._conn = self._engine.connect() self._create_db(self._conn, db_name) self._conn.close() self._engine.dispose() parsed = list(urlparse.urlparse(conf.database.connection)) # NOTE(jd) We need to set an host otherwise urlunparse() will not # construct a proper URL if parsed[1] == '': parsed[1] = 'localhost' parsed[2] = '/' + db_name self.url = urlparse.urlunparse(parsed) class PgSQLManager(SQLManager): @staticmethod def _create_db(conn, db_name): conn.connection.set_isolation_level(0) conn.execute('CREATE DATABASE %s WITH TEMPLATE template0;' % db_name) conn.connection.set_isolation_level(1) class MySQLManager(SQLManager): @staticmethod def _create_db(conn, db_name): conn.execute('CREATE DATABASE %s;' % db_name) class HBaseManager(fixtures.Fixture): def __init__(self, conf): self.url = '%s?table_prefix=%s' % ( conf.database.connection, os.getenv("AODH_TEST_HBASE_TABLE_PREFIX", "test") ) def setUp(self): super(HBaseManager, self).setUp() # Unique prefix for each test to keep data is distinguished because # all test data is stored in one table data_prefix = str(uuid.uuid4().hex) def table(conn, name): return mocks.MockHBaseTable(name, conn, data_prefix) # Mock only real HBase connection, MConnection "table" method # stays origin. mock.patch('happybase.Connection.table', new=table).start() # We shouldn't delete data and tables after each test, # because it last for too long. # All tests tables will be deleted in setup-test-env.sh mock.patch("happybase.Connection.disable_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.delete_table", new=mock.MagicMock()).start() mock.patch("happybase.Connection.create_table", new=mock.MagicMock()).start() class SQLiteManager(fixtures.Fixture): def __init__(self, conf): self.url = "sqlite://" @six.add_metaclass(test_base.SkipNotImplementedMeta) class TestBase(test_base.BaseTestCase): DRIVER_MANAGERS = { 'mongodb': MongoDbManager, 'mysql': MySQLManager, 'postgresql': PgSQLManager, 'sqlite': SQLiteManager, } if mocks is not None: DRIVER_MANAGERS['hbase'] = HBaseManager def setUp(self): super(TestBase, self).setUp() db_url = os.environ.get( 'AODH_TEST_STORAGE_URL', os.environ.get( "OVERTEST_URL", 'sqlite://').replace( "mysql://", "mysql+pymysql://")) engine = urlparse.urlparse(db_url).scheme # In case some drivers have additional specification, for example: # PyMySQL will have scheme mysql+pymysql. engine = engine.split('+')[0] # NOTE(Alexei_987) Shortcut to skip expensive db setUp test_method = self._get_test_method() if (hasattr(test_method, '_run_with') and engine not in test_method._run_with): raise testcase.TestSkipped( 'Test is not applicable for %s' % engine) conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.CONF.set_override('connection', db_url, group="database", enforce_type=True) manager = self.DRIVER_MANAGERS.get(engine) if not manager: self.skipTest("missing driver manager: %s" % engine) self.db_manager = manager(self.CONF) self.useFixture(self.db_manager) self.CONF.set_override('connection', self.db_manager.url, group="database", enforce_type=True) self.alarm_conn = storage.get_connection_from_config(self.CONF) self.alarm_conn.upgrade() self.useFixture(mockpatch.Patch( 'aodh.storage.get_connection_from_config', side_effect=self._get_connection)) def tearDown(self): self.alarm_conn.clear() self.alarm_conn = None super(TestBase, self).tearDown() def _get_connection(self, conf): return self.alarm_conn def run_with(*drivers): """Used to mark tests that are only applicable for certain db driver. Skips test if driver is not available. """ def decorator(test): if isinstance(test, type) and issubclass(test, TestBase): # Decorate all test methods for attr in dir(test): if attr.startswith('test_'): value = getattr(test, attr) if callable(value): if six.PY3: value._run_with = drivers else: value.__func__._run_with = drivers else: test._run_with = drivers return test return decorator aodh-2.0.6/aodh/tests/functional/api/0000775000567000056710000000000013076064720020570 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/api/test_versions.py0000664000567000056710000000307313076064371024056 0ustar jenkinsjenkins00000000000000# Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from aodh.tests.functional import api V2_MEDIA_TYPES = [ { 'base': 'application/json', 'type': 'application/vnd.openstack.telemetry-v2+json' }, { 'base': 'application/xml', 'type': 'application/vnd.openstack.telemetry-v2+xml' } ] V2_HTML_DESCRIPTION = { 'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', } V2_EXPECTED_RESPONSE = { 'id': 'v2', 'links': [ { 'rel': 'self', 'href': 'http://localhost/v2', }, V2_HTML_DESCRIPTION ], 'media-types': V2_MEDIA_TYPES, 'status': 'stable', 'updated': '2013-02-13T00:00:00Z', } V2_VERSION_RESPONSE = { "version": V2_EXPECTED_RESPONSE } VERSIONS_RESPONSE = { "versions": { "values": [ V2_EXPECTED_RESPONSE ] } } class TestVersions(api.FunctionalTest): def test_versions(self): data = self.get_json('/') self.assertEqual(VERSIONS_RESPONSE, data) aodh-2.0.6/aodh/tests/functional/api/test_app.py0000664000567000056710000000471013076064372022766 0ustar jenkinsjenkins00000000000000# Copyright 2014 IBM Corp. All Rights Reserved. # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import cfg from oslo_config import fixture as fixture_config from aodh.api import app from aodh import service from aodh.tests import base class TestApp(base.BaseTestCase): def setUp(self): super(TestApp, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_api_paste_file_not_exist(self): self.CONF.set_override('paste_config', 'non-existent-file', "api") with mock.patch.object(self.CONF, 'find_file') as ff: ff.return_value = None self.assertRaises(cfg.ConfigFilesNotFoundError, app.load_app, self.CONF) @mock.patch('aodh.storage.get_connection_from_config', mock.MagicMock()) @mock.patch('pecan.make_app') def test_pecan_debug(self, mocked): def _check_pecan_debug(g_debug, p_debug, expected, workers=1): self.CONF.set_override('debug', g_debug) if p_debug is not None: self.CONF.set_override('pecan_debug', p_debug, group='api') self.CONF.set_override('workers', workers, 'api') app.setup_app(conf=self.CONF) args, kwargs = mocked.call_args self.assertEqual(expected, kwargs.get('debug')) _check_pecan_debug(g_debug=False, p_debug=None, expected=False) _check_pecan_debug(g_debug=True, p_debug=None, expected=False) _check_pecan_debug(g_debug=True, p_debug=False, expected=False) _check_pecan_debug(g_debug=False, p_debug=True, expected=True) _check_pecan_debug(g_debug=True, p_debug=None, expected=False, workers=5) _check_pecan_debug(g_debug=False, p_debug=True, expected=False, workers=5) aodh-2.0.6/aodh/tests/functional/api/__init__.py0000664000567000056710000001606513076064372022714 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests. """ import os from oslo_config import fixture as fixture_config import pecan import pecan.testing from aodh import service from aodh.tests.functional import db as db_test_base class FunctionalTest(db_test_base.TestBase): """Used for functional tests of Pecan controllers. Used in case when you need to test your literal application and its integration with the framework. """ PATH_PREFIX = '' def setUp(self): super(FunctionalTest, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf self.setup_messaging(self.CONF) self.CONF.set_override('policy_file', os.path.abspath('etc/aodh/policy.json'), group='oslo_policy', enforce_type=True) self.app = self._make_app() def _make_app(self): self.config = { 'app': { 'root': 'aodh.api.controllers.root.RootController', 'modules': ['aodh.api'], }, 'wsme': { 'debug': True, }, } return pecan.testing.load_test_app(self.config, conf=self.CONF) def tearDown(self): super(FunctionalTest, self).tearDown() pecan.set_config({}, overwrite=True) def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ return self.post_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) return response def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param status: Expected status code of response """ full_path = self.PATH_PREFIX + path response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=None, groupby=None, status=None, override_params=None, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: boolean value whether an error is expected based on request :param headers: A dictionary of headers to send along with the request :param extra_environ: A dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param groupby: list of fields to group by :param status: Expected status code of response :param override_params: literally encoded query param string :param params: content for wsgi.input of request """ q = q or [] groupby = groupby or [] full_path = self.PATH_PREFIX + path if override_params: all_params = override_params else: query_params = {'q.field': [], 'q.value': [], 'q.op': [], 'q.type': [], } for query in q: for name in ['field', 'op', 'value', 'type']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) if groupby: all_params.update({'groupby': groupby}) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors, status=status) if not expect_errors: response = response.json return response aodh-2.0.6/aodh/tests/functional/api/v2/0000775000567000056710000000000013076064720021117 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/api/v2/test_alarm_scenarios.py0000664000567000056710000040643313076064372025707 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests alarm operation.""" import datetime import os import uuid from gnocchiclient import exceptions import mock import oslo_messaging.conffixture from oslo_serialization import jsonutils import six from six import moves from aodh import messaging from aodh.storage import models from aodh.tests import constants from aodh.tests.functional.api import v2 def default_alarms(auth_headers): return [models.Alarm(name='name1', type='threshold', enabled=True, alarm_id='a', description='a', state='insufficient data', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=auth_headers['X-User-Id'], project_id=auth_headers['X-Project-Id'], time_constraints=[dict(name='testcons', start='0 11 * * *', duration=300)], rule=dict(comparison_operator='gt', threshold=2.0, statistic='avg', evaluation_periods=60, period=1, meter_name='meter.test', query=[{'field': 'project_id', 'op': 'eq', 'value': auth_headers['X-Project-Id']} ]), ), models.Alarm(name='name2', type='threshold', enabled=True, alarm_id='b', description='b', state='insufficient data', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=auth_headers['X-User-Id'], project_id=auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=4.0, statistic='avg', evaluation_periods=60, period=1, meter_name='meter.test', query=[{'field': 'project_id', 'op': 'eq', 'value': auth_headers['X-Project-Id']} ]), ), models.Alarm(name='name3', type='threshold', enabled=True, alarm_id='c', description='c', state='insufficient data', severity='moderate', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=auth_headers['X-User-Id'], project_id=auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=3.0, statistic='avg', evaluation_periods=60, period=1, meter_name='meter.mine', query=[{'field': 'project_id', 'op': 'eq', 'value': auth_headers['X-Project-Id']} ]), ), models.Alarm(name='name4', type='combination', enabled=True, alarm_id='d', description='d', state='insufficient data', severity='low', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=auth_headers['X-User-Id'], project_id=auth_headers['X-Project-Id'], time_constraints=[], rule=dict(alarm_ids=['a', 'b'], operator='or'), ), ] class LegacyPolicyFileMixin(object): def setUp(self): super(LegacyPolicyFileMixin, self).setUp() self.CONF.set_override( 'policy_file', os.path.abspath('aodh/tests/policy.json-pre-mikita'), group='oslo_policy') self.app = self._make_app() class TestAlarmsBase(v2.FunctionalTest): def setUp(self): super(TestAlarmsBase, self).setUp() self.auth_headers = {'X-User-Id': str(uuid.uuid4()), 'X-Project-Id': str(uuid.uuid4())} @staticmethod def _add_default_threshold_rule(alarm): if (alarm['type'] == 'threshold' and 'exclude_outliers' not in alarm['threshold_rule']): alarm['threshold_rule']['exclude_outliers'] = False def _verify_alarm(self, json, alarm, expected_name=None): if expected_name and alarm.name != expected_name: self.fail("Alarm not found") self._add_default_threshold_rule(json) for key in json: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(json[key], getattr(alarm, storage_key)) def _get_alarm(self, id, auth_headers=None): data = self.get_json('/alarms', headers=auth_headers or self.auth_headers) match = [a for a in data if a['alarm_id'] == id] self.assertEqual(1, len(match), 'alarm %s not found' % id) return match[0] def _update_alarm(self, id, updated_data, auth_headers=None): data = self._get_alarm(id, auth_headers) data.update(updated_data) self.put_json('/alarms/%s' % id, params=data, headers=auth_headers or self.auth_headers) def _delete_alarm(self, alarm, auth_headers=None): self.delete('/alarms/%s' % alarm['alarm_id'], headers=auth_headers or self.auth_headers, status=204) class TestListEmptyAlarms(TestAlarmsBase): def test_empty(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual([], data) class TestAlarms(TestAlarmsBase): def setUp(self): super(TestAlarms, self).setUp() for alarm in default_alarms(self.auth_headers): self.alarm_conn.update_alarm(alarm) def test_list_alarms(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(4, len(data)) self.assertEqual(set(['name1', 'name2', 'name3', 'name4']), set(r['name'] for r in data)) self.assertEqual(set(['meter.test', 'meter.mine']), set(r['threshold_rule']['meter_name'] for r in data if 'threshold_rule' in r)) self.assertEqual(set(['or']), set(r['combination_rule']['operator'] for r in data if 'combination_rule' in r)) def test_alarms_query_with_timestamp(self): date_time = datetime.datetime(2012, 7, 2, 10, 41) isotime = date_time.isoformat() resp = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'timestamp', 'op': 'gt', 'value': isotime}], expect_errors=True) self.assertEqual(resp.status_code, 400) self.assertEqual(jsonutils.loads(resp.body)['error_message'] ['faultstring'], 'Unknown argument: "timestamp": ' 'not valid for this resource') def test_alarms_query_with_meter(self): resp = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'meter', 'op': 'eq', 'value': 'meter.mine'}], ) self.assertEqual(1, len(resp)) self.assertEqual('c', resp[0]['alarm_id']) self.assertEqual('meter.mine', resp[0] ['threshold_rule'] ['meter_name']) def test_alarms_query_with_state(self): alarm = models.Alarm(name='disabled', type='combination', enabled=False, alarm_id='d', description='d', state='ok', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(alarm_ids=['a', 'b'], operator='or'), severity='critical') self.alarm_conn.update_alarm(alarm) resp = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'state', 'op': 'eq', 'value': 'ok'}], ) self.assertEqual(1, len(resp)) self.assertEqual('ok', resp[0]['state']) def test_list_alarms_by_type(self): alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'type', 'op': 'eq', 'value': 'threshold'}]) self.assertEqual(3, len(alarms)) self.assertEqual(set(['threshold']), set(alarm['type'] for alarm in alarms)) def test_get_not_existing_alarm(self): resp = self.get_json('/alarms/alarm-id-3', headers=self.auth_headers, expect_errors=True) self.assertEqual(404, resp.status_code) self.assertEqual('Alarm alarm-id-3 not found in project %s' % self.auth_headers["X-Project-Id"], jsonutils.loads(resp.body)['error_message'] ['faultstring']) def test_get_alarm(self): alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name1', }]) self.assertEqual('name1', alarms[0]['name']) self.assertEqual('meter.test', alarms[0]['threshold_rule']['meter_name']) one = self.get_json('/alarms/%s' % alarms[0]['alarm_id'], headers=self.auth_headers) self.assertEqual('name1', one['name']) self.assertEqual('meter.test', one['threshold_rule']['meter_name']) self.assertEqual(alarms[0]['alarm_id'], one['alarm_id']) self.assertEqual(alarms[0]['repeat_actions'], one['repeat_actions']) self.assertEqual(alarms[0]['time_constraints'], one['time_constraints']) def test_get_alarm_disabled(self): alarm = models.Alarm(name='disabled', type='combination', enabled=False, alarm_id='d', description='d', state='insufficient data', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(alarm_ids=['a', 'b'], operator='or'), severity='critical') self.alarm_conn.update_alarm(alarm) alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'enabled', 'value': 'False'}]) self.assertEqual(1, len(alarms)) self.assertEqual('disabled', alarms[0]['name']) one = self.get_json('/alarms/%s' % alarms[0]['alarm_id'], headers=self.auth_headers) self.assertEqual('disabled', one['name']) def test_get_alarm_project_filter_wrong_op_normal_user(self): project = self.auth_headers['X-Project-Id'] def _test(field, op): response = self.get_json('/alarms', q=[{'field': field, 'op': op, 'value': project}], expect_errors=True, status=400, headers=self.auth_headers) faultstring = ('Invalid input for field/attribute op. ' 'Value: \'%(op)s\'. unimplemented operator ' 'for %(field)s' % {'field': field, 'op': op}) self.assertEqual(faultstring, response.json['error_message']['faultstring']) _test('project', 'ne') _test('project_id', 'ne') def test_get_alarm_project_filter_normal_user(self): project = self.auth_headers['X-Project-Id'] def _test(field): alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': field, 'op': 'eq', 'value': project}]) self.assertEqual(4, len(alarms)) _test('project') _test('project_id') def test_get_alarm_other_project_normal_user(self): def _test(field): response = self.get_json('/alarms', q=[{'field': field, 'op': 'eq', 'value': 'other-project'}], expect_errors=True, status=401, headers=self.auth_headers) faultstring = 'Not Authorized to access project other-project' self.assertEqual(faultstring, response.json['error_message']['faultstring']) _test('project') _test('project_id') def test_get_alarm_forbiden(self): pf = os.path.abspath('aodh/tests/functional/api/v2/policy.json-test') self.CONF.set_override('policy_file', pf, group='oslo_policy', enforce_type=True) self.app = self._make_app() response = self.get_json('/alarms', expect_errors=True, status=403, headers=self.auth_headers) faultstring = 'RBAC Authorization Failed' self.assertEqual(403, response.status_code) self.assertEqual(faultstring, response.json['error_message']['faultstring']) def test_post_alarm_wsme_workaround(self): jsons = { 'type': { 'name': 'missing type', 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 2.0, } }, 'name': { 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 2.0, } }, 'threshold_rule/meter_name': { 'name': 'missing meter_name', 'type': 'threshold', 'threshold_rule': { 'threshold': 2.0, } }, 'threshold_rule/threshold': { 'name': 'missing threshold', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', } }, 'combination_rule/alarm_ids': { 'name': 'missing alarm_ids', 'type': 'combination', 'combination_rule': {} } } for field, json in six.iteritems(jsons): resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) self.assertEqual("Invalid input for field/attribute %s." " Value: \'None\'. Mandatory field missing." % field.split('/', 1)[-1], resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_invalid_alarm_time_constraint_start(self): json = { 'name': 'added_alarm_invalid_constraint_duration', 'type': 'threshold', 'time_constraints': [ { 'name': 'testcons', 'start': '11:00am', 'duration': 10 } ], 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_duplicate_time_constraint_name(self): json = { 'name': 'added_alarm_duplicate_constraint_name', 'type': 'threshold', 'time_constraints': [ { 'name': 'testcons', 'start': '* 11 * * *', 'duration': 10 }, { 'name': 'testcons', 'start': '* * * * *', 'duration': 20 } ], 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) self.assertEqual( "Time constraint names must be unique for a given alarm.", resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_alarm_null_time_constraint(self): json = { 'name': 'added_alarm_invalid_constraint_duration', 'type': 'threshold', 'time_constraints': None, 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) def test_post_invalid_alarm_time_constraint_duration(self): json = { 'name': 'added_alarm_invalid_constraint_duration', 'type': 'threshold', 'time_constraints': [ { 'name': 'testcons', 'start': '* 11 * * *', 'duration': -1, } ], 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_invalid_alarm_time_constraint_timezone(self): json = { 'name': 'added_alarm_invalid_constraint_timezone', 'type': 'threshold', 'time_constraints': [ { 'name': 'testcons', 'start': '* 11 * * *', 'duration': 10, 'timezone': 'aaaa' } ], 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_invalid_alarm_period(self): json = { 'name': 'added_alarm_invalid_period', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 2.0, 'statistic': 'avg', 'period': -1, } } self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_null_rule(self): json = { 'name': 'added_alarm_invalid_threshold_rule', 'type': 'threshold', 'threshold_rule': None, 'combination_rule': None, } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) self.assertEqual( "threshold_rule must be set for threshold type alarm", resp.json['error_message']['faultstring']) def test_post_invalid_alarm_input_state(self): json = { 'name': 'alarm1', 'state': 'bad_state', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 50.0 } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_err_msg = ("Invalid input for field/attribute state." " Value: 'bad_state'.") self.assertIn(expected_err_msg, resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_invalid_alarm_input_severity(self): json = { 'name': 'alarm1', 'state': 'ok', 'severity': 'bad_value', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 50.0 } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_err_msg = ("Invalid input for field/attribute severity." " Value: 'bad_value'.") self.assertIn(expected_err_msg, resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_invalid_alarm_input_type(self): json = { 'name': 'alarm3', 'state': 'ok', 'type': 'bad_type', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 50.0 } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_err_msg = ("Invalid input for field/attribute" " type." " Value: 'bad_type'.") self.assertIn(expected_err_msg, resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_invalid_alarm_input_enabled_str(self): json = { 'name': 'alarm5', 'enabled': 'bad_enabled', 'state': 'ok', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 50.0 } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_err_msg = "Value not an unambiguous boolean: bad_enabled" self.assertIn(expected_err_msg, resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_invalid_alarm_input_enabled_int(self): json = { 'name': 'alarm6', 'enabled': 0, 'state': 'ok', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 50.0 } } resp = self.post_json('/alarms', params=json, headers=self.auth_headers) self.assertFalse(resp.json['enabled']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(5, len(alarms)) def test_post_invalid_alarm_have_multiple_rules(self): json = { 'name': 'added_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'meter', 'value': 'ameter'}], 'comparison_operator': 'gt', 'threshold': 2.0, }, 'combination_rule': { 'alarm_ids': ['a', 'b'], } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) # threshold_rule and combination_rule order is not # predictable so it is not possible to do an exact match # here error_faultstring = resp.json['error_message']['faultstring'] for expected_string in ['threshold_rule', 'combination_rule', 'cannot be set at the same time']: self.assertIn(expected_string, error_faultstring) def _do_post_alarm_invalid_action(self, ok_actions=None, alarm_actions=None, insufficient_data_actions=None, error_message=None): ok_actions = ok_actions or [] alarm_actions = alarm_actions or [] insufficient_data_actions = insufficient_data_actions or [] json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'threshold', 'ok_actions': ok_actions, 'alarm_actions': alarm_actions, 'insufficient_data_actions': insufficient_data_actions, 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': '3', 'period': '180', } } resp = self.post_json('/alarms', params=json, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) self.assertEqual(error_message, resp.json['error_message']['faultstring']) def test_post_invalid_alarm_ok_actions(self): self._do_post_alarm_invalid_action( ok_actions=['spam://something/ok'], error_message='Unsupported action spam://something/ok') def test_post_invalid_alarm_alarm_actions(self): self._do_post_alarm_invalid_action( alarm_actions=['spam://something/alarm'], error_message='Unsupported action spam://something/alarm') def test_post_invalid_alarm_insufficient_data_actions(self): self._do_post_alarm_invalid_action( insufficient_data_actions=['spam://something/insufficient'], error_message='Unsupported action spam://something/insufficient') @staticmethod def _fake_urlsplit(*args, **kwargs): raise Exception("Evil urlsplit!") def test_post_invalid_alarm_actions_format(self): with mock.patch('oslo_utils.netutils.urlsplit', self._fake_urlsplit): self._do_post_alarm_invalid_action( alarm_actions=['http://[::1'], error_message='Unable to parse action http://[::1') def test_post_alarm_defaults(self): to_check = { 'enabled': True, 'name': 'added_alarm_defaults', 'ok_actions': [], 'alarm_actions': [], 'insufficient_data_actions': [], 'repeat_actions': False, } json = { 'name': 'added_alarm_defaults', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(5, len(alarms)) for alarm in alarms: if alarm.name == 'added_alarm_defaults': for key in to_check: self.assertEqual(to_check[key], getattr(alarm, key)) break else: self.fail("Alarm not found") def test_post_conflict(self): json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'threshold', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': '3', 'period': '180', } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) self.post_json('/alarms', params=json, status=409, headers=self.auth_headers) def _do_test_post_alarm(self, exclude_outliers=None): json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'threshold', 'severity': 'low', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': '3', 'period': '180', } } if exclude_outliers is not None: json['threshold_rule']['exclude_outliers'] = exclude_outliers self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) json['threshold_rule']['query'].append({ 'field': 'project_id', 'op': 'eq', 'value': self.auth_headers['X-Project-Id']}) # to check to IntegerType type conversion json['threshold_rule']['evaluation_periods'] = 3 json['threshold_rule']['period'] = 180 self._verify_alarm(json, alarms[0], 'added_alarm') def test_post_alarm_outlier_exclusion_set(self): self._do_test_post_alarm(True) def test_post_alarm_outlier_exclusion_clear(self): self._do_test_post_alarm(False) def test_post_alarm_outlier_exclusion_defaulted(self): self._do_test_post_alarm() def test_post_alarm_noauth(self): json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'threshold', 'severity': 'low', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': '3', 'exclude_outliers': False, 'period': '180', } } self.post_json('/alarms', params=json, status=201) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) # to check to BoundedInt type conversion json['threshold_rule']['evaluation_periods'] = 3 json['threshold_rule']['period'] = 180 if alarms[0].name == 'added_alarm': for key in json: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(getattr(alarms[0], storage_key), json[key]) else: self.fail("Alarm not found") def _do_test_post_alarm_as_admin(self, explicit_project_constraint): """Test the creation of an alarm as admin for another project.""" json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'threshold', 'user_id': 'auseridthatisnotmine', 'project_id': 'aprojectidthatisnotmine', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } if explicit_project_constraint: project_constraint = {'field': 'project_id', 'op': 'eq', 'value': 'aprojectidthatisnotmine'} json['threshold_rule']['query'].append(project_constraint) headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'admin' self.post_json('/alarms', params=json, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self.assertEqual('auseridthatisnotmine', alarms[0].user_id) self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) self._add_default_threshold_rule(json) if alarms[0].name == 'added_alarm': for key in json: if key.endswith('_rule'): storage_key = 'rule' if explicit_project_constraint: self.assertEqual(json[key], getattr(alarms[0], storage_key)) else: query = getattr(alarms[0], storage_key).get('query') self.assertEqual(2, len(query)) implicit_constraint = { u'field': u'project_id', u'value': u'aprojectidthatisnotmine', u'op': u'eq' } self.assertEqual(implicit_constraint, query[1]) else: self.assertEqual(json[key], getattr(alarms[0], key)) else: self.fail("Alarm not found") def test_post_alarm_as_admin_explicit_project_constraint(self): """Test the creation of an alarm as admin for another project. With an explicit query constraint on the owner's project ID. """ self._do_test_post_alarm_as_admin(True) def test_post_alarm_as_admin_implicit_project_constraint(self): """Test the creation of an alarm as admin for another project. Test without an explicit query constraint on the owner's project ID. """ self._do_test_post_alarm_as_admin(False) def test_post_alarm_as_admin_no_user(self): """Test the creation of an alarm. Test the creation of an alarm as admin for another project but forgetting to set the values. """ json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'threshold', 'project_id': 'aprojectidthatisnotmine', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}, {'field': 'project_id', 'op': 'eq', 'value': 'aprojectidthatisnotmine'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'admin' self.post_json('/alarms', params=json, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self.assertEqual(self.auth_headers['X-User-Id'], alarms[0].user_id) self.assertEqual('aprojectidthatisnotmine', alarms[0].project_id) self._verify_alarm(json, alarms[0], 'added_alarm') def test_post_alarm_as_admin_no_project(self): """Test the creation of an alarm. Test the creation of an alarm as admin for another project but forgetting to set the values. """ json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'threshold', 'user_id': 'auseridthatisnotmine', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}, {'field': 'project_id', 'op': 'eq', 'value': 'aprojectidthatisnotmine'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'admin' self.post_json('/alarms', params=json, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self.assertEqual('auseridthatisnotmine', alarms[0].user_id) self.assertEqual(self.auth_headers['X-Project-Id'], alarms[0].project_id) self._verify_alarm(json, alarms[0], 'added_alarm') @staticmethod def _alarm_representation_owned_by(identifiers): json = { 'name': 'added_alarm', 'enabled': False, 'type': 'threshold', 'ok_actions': ['http://something/ok'], 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } for aspect, id in six.iteritems(identifiers): json['%s_id' % aspect] = id return json def _do_test_post_alarm_as_nonadmin_on_behalf_of_another(self, identifiers): """Test posting an alarm. Test that posting an alarm as non-admin on behalf of another user/project fails with an explicit 401 instead of reverting to the requestor's identity. """ json = self._alarm_representation_owned_by(identifiers) headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'demo' resp = self.post_json('/alarms', params=json, status=401, headers=headers) aspect = 'user' if 'user' in identifiers else 'project' params = dict(aspect=aspect, id=identifiers[aspect]) self.assertEqual("Not Authorized to access %(aspect)s %(id)s" % params, jsonutils.loads(resp.body)['error_message'] ['faultstring']) def test_post_alarm_as_nonadmin_on_behalf_of_another_user(self): identifiers = dict(user='auseridthatisnotmine') self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_another_project(self): identifiers = dict(project='aprojectidthatisnotmine') self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_another_creds(self): identifiers = dict(user='auseridthatisnotmine', project='aprojectidthatisnotmine') self._do_test_post_alarm_as_nonadmin_on_behalf_of_another(identifiers) def _do_test_post_alarm_as_nonadmin_on_behalf_of_self(self, identifiers): """Test posting an alarm. Test posting an alarm as non-admin on behalf of own user/project creates alarm associated with the requestor's identity. """ json = self._alarm_representation_owned_by(identifiers) headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'demo' self.post_json('/alarms', params=json, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self.assertEqual(alarms[0].user_id, self.auth_headers['X-User-Id']) self.assertEqual(alarms[0].project_id, self.auth_headers['X-Project-Id']) def test_post_alarm_as_nonadmin_on_behalf_of_own_user(self): identifiers = dict(user=self.auth_headers['X-User-Id']) self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_own_project(self): identifiers = dict(project=self.auth_headers['X-Project-Id']) self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) def test_post_alarm_as_nonadmin_on_behalf_of_own_creds(self): identifiers = dict(user=self.auth_headers['X-User-Id'], project=self.auth_headers['X-Project-Id']) self._do_test_post_alarm_as_nonadmin_on_behalf_of_self(identifiers) def test_post_alarm_with_mismatch_between_type_and_rule(self): """Test the creation of an combination alarm with threshold rule.""" json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': '3', 'period': '180', } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) self.assertEqual( "combination_rule must be set for combination type alarm", resp.json['error_message']['faultstring']) def test_post_alarm_with_duplicate_actions(self): body = { 'name': 'dup-alarm-actions', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['a', 'b'], }, 'alarm_actions': ['http://no.where', 'http://no.where'] } resp = self.post_json('/alarms', params=body, headers=self.auth_headers) self.assertEqual(201, resp.status_code) alarms = list(self.alarm_conn.get_alarms(name='dup-alarm-actions')) self.assertEqual(1, len(alarms)) self.assertEqual(['http://no.where'], alarms[0].alarm_actions) def test_post_alarm_with_too_many_actions(self): self.CONF.set_override('alarm_max_actions', 1) body = { 'name': 'alarm-with-many-actions', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['a', 'b'], }, 'alarm_actions': ['http://no.where', 'http://no.where2'] } resp = self.post_json('/alarms', params=body, expect_errors=True, headers=self.auth_headers) self.assertEqual(400, resp.status_code) self.assertEqual("alarm_actions count exceeds maximum value 1", resp.json['error_message']['faultstring']) def test_post_alarm_normal_user_set_log_actions(self): body = { 'name': 'log_alarm_actions', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['a', 'b'], }, 'alarm_actions': ['log://'] } resp = self.post_json('/alarms', params=body, expect_errors=True, headers=self.auth_headers) self.assertEqual(401, resp.status_code) expected_msg = ("You are not authorized to create action: log://") self.assertEqual(expected_msg, resp.json['error_message']['faultstring']) def test_post_alarm_normal_user_set_test_actions(self): body = { 'name': 'test_alarm_actions', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['a', 'b'], }, 'alarm_actions': ['test://'] } resp = self.post_json('/alarms', params=body, expect_errors=True, headers=self.auth_headers) self.assertEqual(401, resp.status_code) expected_msg = ("You are not authorized to create action: test://") self.assertEqual(expected_msg, resp.json['error_message']['faultstring']) def test_post_alarm_admin_user_set_log_test_actions(self): body = { 'name': 'admin_alarm_actions', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['a', 'b'], }, 'alarm_actions': ['test://', 'log://'] } headers = self.auth_headers headers['X-Roles'] = 'admin' self.post_json('/alarms', params=body, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(name='admin_alarm_actions')) self.assertEqual(1, len(alarms)) self.assertEqual(['test://', 'log://'], alarms[0].alarm_actions) def test_post_alarm_without_actions(self): body = { 'name': 'alarm_actions_none', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['a', 'b'], }, 'alarm_actions': None } headers = self.auth_headers headers['X-Roles'] = 'admin' self.post_json('/alarms', params=body, status=201, headers=headers) alarms = list(self.alarm_conn.get_alarms(name='alarm_actions_none')) self.assertEqual(1, len(alarms)) # FIXME(sileht): This should really returns [] not None # but the mongodb and sql just store the json dict as is... # migration script for sql will be a mess because we have # to parse all JSON :( # I guess we assume that wsme convert the None input to [] # because of the array type, but it won't... self.assertIsNone(alarms[0].alarm_actions) def test_post_alarm_trust(self): json = { 'name': 'added_alarm_defaults', 'type': 'threshold', 'ok_actions': ['trust+http://my.server:1234/foo'], 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } auth = mock.Mock() trust_client = mock.Mock() with mock.patch('aodh.keystone_client.get_client') as client: mock_session = mock.Mock() mock_session.get_user_id.return_value = 'my_user' client.return_value = mock.Mock(session=mock_session) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client trust_client.trusts.create.return_value = mock.Mock(id='5678') self.post_json('/alarms', params=json, status=201, headers=self.auth_headers, extra_environ={'keystone.token_auth': auth}) trust_client.trusts.create.assert_called_once_with( trustor_user=self.auth_headers['X-User-Id'], trustee_user='my_user', project=self.auth_headers['X-Project-Id'], impersonation=True, role_names=[]) alarms = list(self.alarm_conn.get_alarms()) for alarm in alarms: if alarm.name == 'added_alarm_defaults': self.assertEqual( ['trust+http://5678:delete@my.server:1234/foo'], alarm.ok_actions) break else: self.fail("Alarm not found") with mock.patch('aodh.keystone_client.get_client') as client: client.return_value = mock.Mock( auth_ref=mock.Mock(user_id='my_user')) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client self.delete('/alarms/%s' % alarm.alarm_id, headers=self.auth_headers, status=204, extra_environ={'keystone.token_auth': auth}) trust_client.trusts.delete.assert_called_once_with('5678') def test_put_alarm(self): json = { 'enabled': False, 'name': 'name_put', 'state': 'ok', 'type': 'threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } data = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name1', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] self.put_json('/alarms/%s' % alarm_id, params=json, headers=self.auth_headers) alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, enabled=False))[0] json['threshold_rule']['query'].append({ 'field': 'project_id', 'op': 'eq', 'value': self.auth_headers['X-Project-Id']}) self._verify_alarm(json, alarm) def test_put_alarm_as_admin(self): json = { 'user_id': 'myuserid', 'project_id': 'myprojectid', 'enabled': False, 'name': 'name_put', 'state': 'ok', 'type': 'threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}, {'field': 'project_id', 'op': 'eq', 'value': 'myprojectid'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'admin' data = self.get_json('/alarms', headers=headers, q=[{'field': 'name', 'value': 'name1', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] self.put_json('/alarms/%s' % alarm_id, params=json, headers=headers) alarm = list(self.alarm_conn.get_alarms(alarm_id=alarm_id, enabled=False))[0] self.assertEqual('myuserid', alarm.user_id) self.assertEqual('myprojectid', alarm.project_id) self._verify_alarm(json, alarm) def test_put_alarm_wrong_field(self): json = { 'this_can_not_be_correct': 'ha', 'enabled': False, 'name': 'name1', 'state': 'ok', 'type': 'threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } data = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name1', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] resp = self.put_json('/alarms/%s' % alarm_id, expect_errors=True, params=json, headers=self.auth_headers) self.assertEqual(400, resp.status_code) def test_put_alarm_with_existing_name(self): """Test that update a threshold alarm with an existing name.""" json = { 'enabled': False, 'name': 'name1', 'state': 'ok', 'type': 'threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } data = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name2', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] resp = self.put_json('/alarms/%s' % alarm_id, expect_errors=True, status=409, params=json, headers=self.auth_headers) self.assertEqual( 'Alarm with name=name1 exists', resp.json['error_message']['faultstring']) def test_put_invalid_alarm_actions(self): json = { 'enabled': False, 'name': 'name1', 'state': 'ok', 'type': 'threshold', 'severity': 'critical', 'ok_actions': ['spam://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.field', 'op': 'eq', 'value': '5', 'type': 'string'}], 'comparison_operator': 'le', 'statistic': 'count', 'threshold': 50, 'evaluation_periods': 3, 'period': 180, } } data = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name2', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] resp = self.put_json('/alarms/%s' % alarm_id, expect_errors=True, status=400, params=json, headers=self.auth_headers) self.assertEqual( 'Unsupported action spam://something/ok', resp.json['error_message']['faultstring']) def test_put_alarm_trust(self): data = self._get_alarm('a') data.update({'ok_actions': ['trust+http://something/ok']}) trust_client = mock.Mock() with mock.patch('aodh.keystone_client.get_client') as client: client.return_value = mock.Mock( auth_ref=mock.Mock(user_id='my_user')) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client trust_client.trusts.create.return_value = mock.Mock(id='5678') self.put_json('/alarms/%s' % data['alarm_id'], params=data, headers=self.auth_headers) data = self._get_alarm('a') self.assertEqual( ['trust+http://5678:delete@something/ok'], data['ok_actions']) data.update({'ok_actions': ['http://no-trust-something/ok']}) with mock.patch('aodh.keystone_client.get_client') as client: client.return_value = mock.Mock( auth_ref=mock.Mock(user_id='my_user')) with mock.patch('keystoneclient.v3.client.Client') as sub_client: sub_client.return_value = trust_client self.put_json('/alarms/%s' % data['alarm_id'], params=data, headers=self.auth_headers) trust_client.trusts.delete.assert_called_once_with('5678') data = self._get_alarm('a') self.assertEqual( ['http://no-trust-something/ok'], data['ok_actions']) def test_delete_alarm(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(4, len(data)) resp = self.delete('/alarms/%s' % data[0]['alarm_id'], headers=self.auth_headers, status=204) self.assertEqual(b'', resp.body) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) def test_get_state_alarm(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(4, len(data)) resp = self.get_json('/alarms/%s/state' % data[0]['alarm_id'], headers=self.auth_headers) self.assertEqual(resp, data[0]['state']) def test_set_state_alarm(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(4, len(data)) resp = self.put_json('/alarms/%s/state' % data[0]['alarm_id'], headers=self.auth_headers, params='alarm') alarms = list(self.alarm_conn.get_alarms(alarm_id=data[0]['alarm_id'])) self.assertEqual(1, len(alarms)) self.assertEqual('alarm', alarms[0].state) self.assertEqual('alarm', resp.json) def test_set_invalid_state_alarm(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(4, len(data)) self.put_json('/alarms/%s/state' % data[0]['alarm_id'], headers=self.auth_headers, params='not valid', status=400) def test_alarms_sends_notification(self): # Hit the AlarmsController ... json = { 'name': 'sent_notification', 'type': 'threshold', 'severity': 'low', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 2.0, 'statistic': 'avg', } } endpoint = mock.MagicMock() target = oslo_messaging.Target(topic="notifications") listener = messaging.get_notification_listener( self.transport, [target], [endpoint]) listener.start() endpoint.info.side_effect = lambda *args: listener.stop() self.post_json('/alarms', params=json, headers=self.auth_headers) listener.wait() class PayloadMatcher(object): def __eq__(self, payload): return (payload['detail']['name'] == 'sent_notification' and payload['type'] == 'creation' and payload['detail']['rule']['meter_name'] == 'ameter' and set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', 'project_id', 'timestamp', 'user_id']).issubset(payload.keys())) endpoint.info.assert_called_once_with( {}, 'aodh.api', 'alarm.creation', PayloadMatcher(), mock.ANY) def test_alarm_sends_notification(self): with mock.patch.object(messaging, 'get_notifier') as get_notifier: notifier = get_notifier.return_value self._update_alarm('a', dict(name='new_name')) get_notifier.assert_called_once_with(mock.ANY, publisher_id='aodh.api') calls = notifier.info.call_args_list self.assertEqual(1, len(calls)) args, _ = calls[0] context, event_type, payload = args self.assertEqual('alarm.rule_change', event_type) self.assertEqual('new_name', payload['detail']['name']) self.assertTrue(set(['alarm_id', 'detail', 'event_id', 'on_behalf_of', 'project_id', 'timestamp', 'type', 'user_id']).issubset(payload.keys())) class TestAlarmsLegacy(LegacyPolicyFileMixin, TestAlarms): pass class TestAlarmsHistory(TestAlarmsBase): def setUp(self): super(TestAlarmsHistory, self).setUp() alarm = models.Alarm( name='name1', type='threshold', enabled=True, alarm_id='a', description='a', state='insufficient data', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[dict(name='testcons', start='0 11 * * *', duration=300)], rule=dict(comparison_operator='gt', threshold=2.0, statistic='avg', evaluation_periods=60, period=1, meter_name='meter.test', query=[dict(field='project_id', op='eq', value=self.auth_headers['X-Project-Id']) ])) self.alarm_conn.update_alarm(alarm) def _get_alarm_history(self, alarm_id, auth_headers=None, query=None, expect_errors=False, status=200): url = '/alarms/%s/history' % alarm_id if query: url += '?q.op=%(op)s&q.value=%(value)s&q.field=%(field)s' % query resp = self.get_json(url, headers=auth_headers or self.auth_headers, expect_errors=expect_errors) if expect_errors: self.assertEqual(status, resp.status_code) return resp def _assert_is_subset(self, expected, actual): for k, v in six.iteritems(expected): self.assertEqual(v, actual.get(k), 'mismatched field: %s' % k) self.assertIsNotNone(actual['event_id']) def _assert_in_json(self, expected, actual): actual = jsonutils.dumps(jsonutils.loads(actual), sort_keys=True) for k, v in six.iteritems(expected): fragment = jsonutils.dumps({k: v}, sort_keys=True)[1:-1] self.assertIn(fragment, actual, '%s not in %s' % (fragment, actual)) def test_record_alarm_history_config(self): self.CONF.set_override('record_history', False) history = self._get_alarm_history('a') self.assertEqual([], history) self._update_alarm('a', dict(name='renamed')) history = self._get_alarm_history('a') self.assertEqual([], history) self.CONF.set_override('record_history', True) self._update_alarm('a', dict(name='foobar')) history = self._get_alarm_history('a') self.assertEqual(1, len(history)) def test_record_alarm_history_severity(self): alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual([], history) self.assertEqual('critical', alarm['severity']) self._update_alarm('a', dict(severity='low')) new_alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual(1, len(history)) self.assertEqual(jsonutils.dumps({'severity': 'low'}), history[0]['detail']) self.assertEqual('low', new_alarm['severity']) def test_record_alarm_history_statistic(self): alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual([], history) self.assertEqual('avg', alarm['threshold_rule']['statistic']) rule = alarm['threshold_rule'].copy() rule['statistic'] = 'min' data = dict(threshold_rule=rule) self._update_alarm('a', data) new_alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual(1, len(history)) self.assertEqual("min", jsonutils.loads(history[0]['detail']) ['rule']["statistic"]) self.assertEqual('min', new_alarm['threshold_rule']['statistic']) def test_redundant_update_alarm_property_no_history_change(self): alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual([], history) self.assertEqual('critical', alarm['severity']) self._update_alarm('a', dict(severity='low')) new_alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual(1, len(history)) self.assertEqual(jsonutils.dumps({'severity': 'low'}), history[0]['detail']) self.assertEqual('low', new_alarm['severity']) self._update_alarm('a', dict(severity='low')) updated_history = self._get_alarm_history('a') self.assertEqual(1, len(updated_history)) self.assertEqual(jsonutils.dumps({'severity': 'low'}), updated_history[0]['detail']) self.assertEqual(history, updated_history) def test_get_recorded_alarm_history_on_create(self): new_alarm = { 'name': 'new_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [], 'comparison_operator': 'le', 'statistic': 'max', 'threshold': 42.0, 'period': 60, 'evaluation_periods': 1, } } self.post_json('/alarms', params=new_alarm, status=201, headers=self.auth_headers) alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'new_alarm', }]) self.assertEqual(1, len(alarms)) alarm = alarms[0] history = self._get_alarm_history(alarm['alarm_id']) self.assertEqual(1, len(history)) self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], on_behalf_of=alarm['project_id'], project_id=alarm['project_id'], type='creation', user_id=alarm['user_id']), history[0]) self._add_default_threshold_rule(new_alarm) new_alarm['rule'] = new_alarm['threshold_rule'] del new_alarm['threshold_rule'] new_alarm['rule']['query'].append({ 'field': 'project_id', 'op': 'eq', 'value': self.auth_headers['X-Project-Id']}) self._assert_in_json(new_alarm, history[0]['detail']) def _do_test_get_recorded_alarm_history_on_update(self, data, type, detail, auth=None): alarm = self._get_alarm('a') history = self._get_alarm_history('a') self.assertEqual([], history) self._update_alarm('a', data, auth) history = self._get_alarm_history('a') self.assertEqual(1, len(history)) project_id = auth['X-Project-Id'] if auth else alarm['project_id'] user_id = auth['X-User-Id'] if auth else alarm['user_id'] self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], detail=detail, on_behalf_of=alarm['project_id'], project_id=project_id, type=type, user_id=user_id), history[0]) def test_get_recorded_alarm_history_rule_change(self): data = dict(name='renamed') detail = '{"name": "renamed"}' self._do_test_get_recorded_alarm_history_on_update(data, 'rule change', detail) def test_get_recorded_alarm_history_state_transition_on_behalf_of(self): # credentials for new non-admin user, on who's behalf the alarm # is created member_user = str(uuid.uuid4()) member_project = str(uuid.uuid4()) member_auth = {'X-Roles': 'member', 'X-User-Id': member_user, 'X-Project-Id': member_project} new_alarm = { 'name': 'new_alarm', 'type': 'threshold', 'state': 'ok', 'threshold_rule': { 'meter_name': 'other_meter', 'query': [{'field': 'project_id', 'op': 'eq', 'value': member_project}], 'comparison_operator': 'le', 'statistic': 'max', 'threshold': 42.0, 'evaluation_periods': 1, 'period': 60 } } self.post_json('/alarms', params=new_alarm, status=201, headers=member_auth) alarm = self.get_json('/alarms', headers=member_auth)[0] # effect a state transition as a new administrative user admin_user = str(uuid.uuid4()) admin_project = str(uuid.uuid4()) admin_auth = {'X-Roles': 'admin', 'X-User-Id': admin_user, 'X-Project-Id': admin_project} data = dict(state='alarm') self._update_alarm(alarm['alarm_id'], data, auth_headers=admin_auth) self._add_default_threshold_rule(new_alarm) new_alarm['rule'] = new_alarm['threshold_rule'] del new_alarm['threshold_rule'] # ensure that both the creation event and state transition # are visible to the non-admin alarm owner and admin user alike for auth in [member_auth, admin_auth]: history = self._get_alarm_history(alarm['alarm_id'], auth_headers=auth) self.assertEqual(2, len(history), 'hist: %s' % history) self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], detail='{"state": "alarm"}', on_behalf_of=alarm['project_id'], project_id=admin_project, type='rule change', user_id=admin_user), history[0]) self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], on_behalf_of=alarm['project_id'], project_id=member_project, type='creation', user_id=member_user), history[1]) self._assert_in_json(new_alarm, history[1]['detail']) # ensure on_behalf_of cannot be constrained in an API call query = dict(field='on_behalf_of', op='eq', value=alarm['project_id']) self._get_alarm_history(alarm['alarm_id'], auth_headers=auth, query=query, expect_errors=True, status=400) def test_get_recorded_alarm_history_segregation(self): data = dict(name='renamed') detail = '{"name": "renamed"}' self._do_test_get_recorded_alarm_history_on_update(data, 'rule change', detail) auth = {'X-Roles': 'member', 'X-User-Id': str(uuid.uuid4()), 'X-Project-Id': str(uuid.uuid4())} history = self._get_alarm_history('a', auth) self.assertEqual([], history) def test_delete_alarm_history_after_deletion(self): history = self._get_alarm_history('a') self.assertEqual([], history) self._update_alarm('a', dict(name='renamed')) history = self._get_alarm_history('a') self.assertEqual(1, len(history)) self.delete('/alarms/%s' % 'a', headers=self.auth_headers, status=204) history = self._get_alarm_history('a') self.assertEqual(0, len(history)) def test_get_alarm_history_ordered_by_recentness(self): for i in moves.xrange(10): self._update_alarm('a', dict(name='%s' % i)) history = self._get_alarm_history('a') self.assertEqual(10, len(history), 'hist: %s' % history) self._assert_is_subset(dict(alarm_id='a', type='rule change'), history[0]) for i in moves.xrange(1, 11): detail = '{"name": "%s"}' % (10 - i) self._assert_is_subset(dict(alarm_id='a', detail=detail, type='rule change'), history[i - 1]) def test_get_alarm_history_constrained_by_timestamp(self): alarm = self._get_alarm('a') self._update_alarm('a', dict(name='renamed')) after = datetime.datetime.utcnow().isoformat() query = dict(field='timestamp', op='gt', value=after) history = self._get_alarm_history('a', query=query) self.assertEqual(0, len(history)) query['op'] = 'le' history = self._get_alarm_history('a', query=query) self.assertEqual(1, len(history)) detail = '{"name": "renamed"}' self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], detail=detail, on_behalf_of=alarm['project_id'], project_id=alarm['project_id'], type='rule change', user_id=alarm['user_id']), history[0]) def test_get_alarm_history_constrained_by_type(self): alarm = self._get_alarm('a') self._update_alarm('a', dict(name='renamed2')) query = dict(field='type', op='eq', value='rule change') history = self._get_alarm_history('a', query=query) self.assertEqual(1, len(history)) detail = '{"name": "renamed2"}' self._assert_is_subset(dict(alarm_id=alarm['alarm_id'], detail=detail, on_behalf_of=alarm['project_id'], project_id=alarm['project_id'], type='rule change', user_id=alarm['user_id']), history[0]) def test_get_alarm_history_constrained_by_alarm_id_failed(self): query = dict(field='alarm_id', op='eq', value='a') resp = self._get_alarm_history('a', query=query, expect_errors=True, status=400) msg = ('Unknown argument: "alarm_id": unrecognized' " field in query: [], valid keys: ['project', " "'search_offset', 'severity', 'timestamp'," " 'type', 'user']") msg = msg.format(key=u'alarm_id', value=u'a') self.assertEqual(msg, resp.json['error_message']['faultstring']) def test_get_alarm_history_constrained_by_not_supported_rule(self): query = dict(field='abcd', op='eq', value='abcd') resp = self._get_alarm_history('a', query=query, expect_errors=True, status=400) msg = ('Unknown argument: "abcd": unrecognized' " field in query: [], valid keys: ['project', " "'search_offset', 'severity', 'timestamp'," " 'type', 'user']") msg = msg.format(key=u'abcd', value=u'abcd') self.assertEqual(msg, resp.json['error_message']['faultstring']) def test_get_alarm_history_constrained_by_severity(self): self._update_alarm('a', dict(severity='low')) query = dict(field='severity', op='eq', value='low') history = self._get_alarm_history('a', query=query) self.assertEqual(1, len(history)) self.assertEqual(jsonutils.dumps({'severity': 'low'}), history[0]['detail']) def test_get_nonexistent_alarm_history(self): # the existence of alarm history is independent of the # continued existence of the alarm itself history = self._get_alarm_history('foobar') self.assertEqual([], history) class TestAlarmsHistoryLegacy(LegacyPolicyFileMixin, TestAlarmsHistory): pass class TestAlarmsQuotas(TestAlarmsBase): def _test_alarm_quota(self): alarm = { 'name': 'alarm', 'type': 'threshold', 'user_id': self.auth_headers['X-User-Id'], 'project_id': self.auth_headers['X-Project-Id'], 'threshold_rule': { 'meter_name': 'testmeter', 'query': [], 'comparison_operator': 'le', 'statistic': 'max', 'threshold': 42.0, 'period': 60, 'evaluation_periods': 1, } } resp = self.post_json('/alarms', params=alarm, headers=self.auth_headers) self.assertEqual(201, resp.status_code) alarms = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(1, len(alarms)) alarm['name'] = 'another_user_alarm' resp = self.post_json('/alarms', params=alarm, expect_errors=True, headers=self.auth_headers) self.assertEqual(403, resp.status_code) faultstring = 'Alarm quota exceeded for user' self.assertIn(faultstring, resp.json['error_message']['faultstring']) alarms = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(1, len(alarms)) def test_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 1) self.CONF.set_override('project_alarm_quota', 1) self._test_alarm_quota() def test_project_alarms_quotas(self): self.CONF.set_override('project_alarm_quota', 1) self._test_alarm_quota() def test_user_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 1) self._test_alarm_quota() def test_larger_limit_project_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 1) self.CONF.set_override('project_alarm_quota', 2) self._test_alarm_quota() def test_larger_limit_user_alarms_quotas(self): self.CONF.set_override('user_alarm_quota', 2) self.CONF.set_override('project_alarm_quota', 1) self._test_alarm_quota() def test_larger_limit_user_alarm_quotas_multitenant_user(self): self.CONF.set_override('user_alarm_quota', 2) self.CONF.set_override('project_alarm_quota', 1) def _test(field, value): query = [{ 'field': field, 'op': 'eq', 'value': value }] alarms = self.get_json('/alarms', q=query, headers=self.auth_headers) self.assertEqual(1, len(alarms)) alarm = { 'name': 'alarm', 'type': 'threshold', 'user_id': self.auth_headers['X-User-Id'], 'project_id': self.auth_headers['X-Project-Id'], 'threshold_rule': { 'meter_name': 'testmeter', 'query': [], 'comparison_operator': 'le', 'statistic': 'max', 'threshold': 42.0, 'period': 60, 'evaluation_periods': 1, } } resp = self.post_json('/alarms', params=alarm, headers=self.auth_headers) self.assertEqual(201, resp.status_code) _test('project_id', self.auth_headers['X-Project-Id']) self.auth_headers['X-Project-Id'] = str(uuid.uuid4()) alarm['name'] = 'another_user_alarm' alarm['project_id'] = self.auth_headers['X-Project-Id'] resp = self.post_json('/alarms', params=alarm, headers=self.auth_headers) self.assertEqual(201, resp.status_code) _test('project_id', self.auth_headers['X-Project-Id']) self.auth_headers["X-roles"] = "admin" alarms = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(2, len(alarms)) class TestAlarmsRuleThreshold(TestAlarmsBase): def test_post_invalid_alarm_statistic(self): json = { 'name': 'added_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'gt', 'threshold': 2.0, 'statistic': 'magic', } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_err_msg = ("Invalid input for field/attribute" " statistic. Value: 'magic'.") self.assertIn(expected_err_msg, resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(0, len(alarms)) def test_post_invalid_alarm_input_comparison_operator(self): json = { 'name': 'alarm2', 'state': 'ok', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'comparison_operator': 'bad_co', 'threshold': 50.0 } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_err_msg = ("Invalid input for field/attribute" " comparison_operator." " Value: 'bad_co'.") self.assertIn(expected_err_msg, resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(0, len(alarms)) def test_post_invalid_alarm_query(self): json = { 'name': 'added_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.invalid', 'field': 'gt', 'value': 'value'}], 'comparison_operator': 'gt', 'threshold': 2.0, 'statistic': 'avg', } } self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(0, len(alarms)) def test_post_invalid_alarm_query_field_type(self): json = { 'name': 'added_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.valid', 'op': 'eq', 'value': 'value', 'type': 'blob'}], 'comparison_operator': 'gt', 'threshold': 2.0, 'statistic': 'avg', } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_error_message = 'The data type blob is not supported.' resp_string = jsonutils.loads(resp.body) fault_string = resp_string['error_message']['faultstring'] self.assertTrue(fault_string.startswith(expected_error_message)) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(0, len(alarms)) def test_post_invalid_alarm_query_non_field(self): json = { 'name': 'added_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'q.field': 'metadata.valid', 'value': 'value'}], 'threshold': 2.0, } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_error_message = ("Unknown attribute for argument " "data.threshold_rule.query: q.field") fault_string = resp.json['error_message']['faultstring'] self.assertEqual(expected_error_message, fault_string) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(0, len(alarms)) def test_post_invalid_alarm_query_non_value(self): json = { 'name': 'added_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'metadata.valid', 'q.value': 'value'}], 'threshold': 2.0, } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_error_message = ("Unknown attribute for argument " "data.threshold_rule.query: q.value") fault_string = resp.json['error_message']['faultstring'] self.assertEqual(expected_error_message, fault_string) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(0, len(alarms)) def test_post_invalid_alarm_timestamp_in_threshold_rule(self): date_time = datetime.datetime(2012, 7, 2, 10, 41) isotime = date_time.isoformat() json = { 'name': 'invalid_alarm', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'timestamp', 'op': 'gt', 'value': isotime}], 'comparison_operator': 'gt', 'threshold': 2.0, } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(0, len(alarms)) self.assertEqual( 'Unknown argument: "timestamp": ' 'not valid for this resource', resp.json['error_message']['faultstring']) def test_post_threshold_rule_defaults(self): to_check = { 'name': 'added_alarm_defaults', 'state': 'insufficient data', 'description': ('Alarm when ameter is eq a avg of ' '300.0 over 60 seconds'), 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'query': [{'field': 'project_id', 'op': 'eq', 'value': self.auth_headers['X-Project-Id']}], 'threshold': 300.0, 'comparison_operator': 'eq', 'statistic': 'avg', 'evaluation_periods': 1, 'period': 60, } } self._add_default_threshold_rule(to_check) json = { 'name': 'added_alarm_defaults', 'type': 'threshold', 'threshold_rule': { 'meter_name': 'ameter', 'threshold': 300.0 } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(alarms)) for alarm in alarms: if alarm.name == 'added_alarm_defaults': for key in to_check: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(to_check[key], getattr(alarm, storage_key)) break else: self.fail("Alarm not found") class TestAlarmsRuleCombination(TestAlarmsBase): def setUp(self): super(TestAlarmsRuleCombination, self).setUp() for alarm in default_alarms(self.auth_headers): self.alarm_conn.update_alarm(alarm) def test_get_alarm_combination(self): alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name4', }]) self.assertEqual('name4', alarms[0]['name']) self.assertEqual(['a', 'b'], alarms[0]['combination_rule']['alarm_ids']) self.assertEqual('or', alarms[0]['combination_rule']['operator']) one = self.get_json('/alarms/%s' % alarms[0]['alarm_id'], headers=self.auth_headers) self.assertEqual('name4', one['name']) self.assertEqual(['a', 'b'], alarms[0]['combination_rule']['alarm_ids']) self.assertEqual('or', alarms[0]['combination_rule']['operator']) self.assertEqual(alarms[0]['alarm_id'], one['alarm_id']) self.assertEqual(alarms[0]['repeat_actions'], one['repeat_actions']) def test_post_alarm_combination(self): json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'combination_rule': { 'alarm_ids': ['a', 'b'], 'operator': 'and', } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) if alarms[0].name == 'added_alarm': for key in json: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(json[key], getattr(alarms[0], storage_key)) else: self.fail("Alarm not found") def test_post_invalid_alarm_combination(self): """Test that post a combination alarm with a not existing alarm id.""" json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'combination_rule': { 'alarm_ids': ['not_exists', 'b'], 'operator': 'and', } } self.post_json('/alarms', params=json, status=404, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(0, len(alarms)) def test_post_invalid_combination_alarm_input_operator(self): json = { 'enabled': False, 'name': 'alarm6', 'state': 'ok', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'combination_rule': { 'alarm_ids': ['a', 'b'], 'operator': 'bad_operator', } } resp = self.post_json('/alarms', params=json, expect_errors=True, status=400, headers=self.auth_headers) expected_err_msg = ("Invalid input for field/attribute" " operator." " Value: 'bad_operator'.") self.assertIn(expected_err_msg, resp.json['error_message']['faultstring']) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(4, len(alarms)) def test_post_combination_alarm_as_user_with_unauthorized_alarm(self): """Test posting a combination alarm. Test that post a combination alarm as normal user/project with an alarm_id unauthorized for this project/user """ json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'combination_rule': { 'alarm_ids': ['a', 'b'], 'operator': 'and', } } an_other_user_auth = {'X-User-Id': str(uuid.uuid4()), 'X-Project-Id': str(uuid.uuid4())} resp = self.post_json('/alarms', params=json, status=404, headers=an_other_user_auth) self.assertEqual("Alarm a not found in project " "%s" % an_other_user_auth['X-Project-Id'], jsonutils.loads(resp.body)['error_message'] ['faultstring']) def test_post_combination_alarm_as_admin_on_behalf_of_an_other_user(self): """Test posting a combination alarm. Test that post a combination alarm as admin on behalf of an other user/project with an alarm_id unauthorized for this project/user """ json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'user_id': 'auseridthatisnotmine', 'project_id': 'aprojectidthatisnotmine', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'combination_rule': { 'alarm_ids': ['a', 'b'], 'operator': 'and', } } headers = {} headers.update(self.auth_headers) headers['X-Roles'] = 'admin' resp = self.post_json('/alarms', params=json, status=404, headers=headers) self.assertEqual("Alarm a not found in project " "aprojectidthatisnotmine", jsonutils.loads(resp.body)['error_message'] ['faultstring']) def test_post_combination_alarm_with_reasonable_description(self): """Test posting a combination alarm. Test that post a combination alarm with two blanks around the operator in alarm description. """ json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'combination_rule': { 'alarm_ids': ['a', 'b'], 'operator': 'and', } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self.assertEqual(u'Combined state of alarms a and b', alarms[0].description) def _do_post_combination_alarm_as_admin_success(self, owner_is_set): """Test posting a combination alarm. Test that post a combination alarm as admin on behalf of nobody with an alarm_id of someone else, with owner set or not """ json = { 'enabled': False, 'name': 'added_alarm', 'state': 'ok', 'type': 'combination', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'combination_rule': { 'alarm_ids': ['a', 'b'], 'operator': 'and', } } an_other_admin_auth = {'X-User-Id': str(uuid.uuid4()), 'X-Project-Id': str(uuid.uuid4()), 'X-Roles': 'admin'} if owner_is_set: json['project_id'] = an_other_admin_auth['X-Project-Id'] json['user_id'] = an_other_admin_auth['X-User-Id'] self.post_json('/alarms', params=json, status=201, headers=an_other_admin_auth) alarms = list(self.alarm_conn.get_alarms(enabled=False)) if alarms[0].name == 'added_alarm': for key in json: if key.endswith('_rule'): storage_key = 'rule' else: storage_key = key self.assertEqual(json[key], getattr(alarms[0], storage_key)) else: self.fail("Alarm not found") def test_post_combination_alarm_as_admin_success_owner_unset(self): self._do_post_combination_alarm_as_admin_success(False) def test_post_combination_alarm_as_admin_success_owner_set(self): self._do_post_combination_alarm_as_admin_success(True) def test_post_alarm_combination_duplicate_alarm_ids(self): """Test combination alarm doesn't allow duplicate alarm ids.""" json_body = { 'name': 'dup_alarm_id', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['a', 'a', 'd', 'a', 'c', 'c', 'b'], } } self.post_json('/alarms', params=json_body, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms(name='dup_alarm_id')) self.assertEqual(1, len(alarms)) self.assertEqual(['a', 'd', 'c', 'b'], alarms[0].rule.get('alarm_ids')) def _test_post_alarm_combination_rule_less_than_two_alarms(self, alarm_ids=None): json_body = { 'name': 'one_alarm_in_combination_rule', 'type': 'combination', 'combination_rule': { 'alarm_ids': alarm_ids or [] } } resp = self.post_json('/alarms', params=json_body, expect_errors=True, status=400, headers=self.auth_headers) self.assertEqual( 'Alarm combination rule should contain at' ' least two different alarm ids.', resp.json['error_message']['faultstring']) def test_post_alarm_combination_rule_with_no_alarm(self): self._test_post_alarm_combination_rule_less_than_two_alarms() def test_post_alarm_combination_rule_with_one_alarm(self): self._test_post_alarm_combination_rule_less_than_two_alarms(['a']) def test_post_alarm_combination_rule_with_two_same_alarms(self): self._test_post_alarm_combination_rule_less_than_two_alarms(['a', 'a']) def test_put_alarm_combination_cannot_specify_itself(self): json = { 'name': 'name4', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['d', 'a'], } } data = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name4', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] resp = self.put_json('/alarms/%s' % alarm_id, expect_errors=True, status=400, params=json, headers=self.auth_headers) msg = 'Cannot specify alarm %s itself in combination rule' % alarm_id self.assertEqual(msg, resp.json['error_message']['faultstring']) def _test_put_alarm_combination_rule_less_than_two_alarms(self, alarm_ids=None): json_body = { 'name': 'name4', 'type': 'combination', 'combination_rule': { 'alarm_ids': alarm_ids or [] } } data = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name4', }]) self.assertEqual(1, len(data)) alarm_id = data[0]['alarm_id'] resp = self.put_json('/alarms/%s' % alarm_id, params=json_body, expect_errors=True, status=400, headers=self.auth_headers) self.assertEqual( 'Alarm combination rule should contain at' ' least two different alarm ids.', resp.json['error_message']['faultstring']) def test_put_alarm_combination_rule_with_no_alarm(self): self._test_put_alarm_combination_rule_less_than_two_alarms() def test_put_alarm_combination_rule_with_one_alarm(self): self._test_put_alarm_combination_rule_less_than_two_alarms(['a']) def test_put_alarm_combination_rule_with_two_same_alarm_itself(self): self._test_put_alarm_combination_rule_less_than_two_alarms(['d', 'd']) def test_put_combination_alarm_with_duplicate_ids(self): """Test combination alarm doesn't allow duplicate alarm ids.""" alarms = self.get_json('/alarms', headers=self.auth_headers, q=[{'field': 'name', 'value': 'name4', }]) self.assertEqual(1, len(alarms)) alarm_id = alarms[0]['alarm_id'] json_body = { 'name': 'name4', 'type': 'combination', 'combination_rule': { 'alarm_ids': ['c', 'a', 'b', 'a', 'c', 'b'], } } self.put_json('/alarms/%s' % alarm_id, params=json_body, status=200, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms(alarm_id=alarm_id)) self.assertEqual(1, len(alarms)) self.assertEqual(['c', 'a', 'b'], alarms[0].rule.get('alarm_ids')) class TestAlarmsRuleCombinationLegacy(LegacyPolicyFileMixin, TestAlarmsRuleCombination): pass class TestAlarmsRuleGnocchi(TestAlarmsBase): def setUp(self): super(TestAlarmsRuleGnocchi, self).setUp() for alarm in [ models.Alarm(name='name1', type='gnocchi_resources_threshold', enabled=True, alarm_id='e', description='e', state='insufficient data', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', granularity=60, evaluation_periods=1, metric='meter.test', resource_type='instance', resource_id=( '6841c175-d7c4-4bc2-bc7a-1c7832271b8f'), ) ), models.Alarm(name='name2', type='gnocchi_aggregation_by_metrics_threshold', enabled=True, alarm_id='f', description='f', state='insufficient data', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', evaluation_periods=1, granularity=60, metrics=[ '41869681-5776-46d6-91ed-cccc43b6e4e3', 'a1fb80f4-c242-4f57-87c6-68f47521059e'] ), ), models.Alarm(name='name3', type='gnocchi_aggregation_by_resources_threshold', enabled=True, alarm_id='g', description='f', state='insufficient data', severity='critical', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, aggregation_method='mean', granularity=60, evaluation_periods=1, metric='meter.test', resource_type='instance', query='{"=": {"server_group": ' '"my_autoscaling_group"}}') ), ]: self.alarm_conn.update_alarm(alarm) def test_list_alarms(self): data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(3, len(data)) self.assertEqual(set(['name1', 'name2', 'name3']), set(r['name'] for r in data)) self.assertEqual(set(['meter.test']), set(r['gnocchi_resources_threshold_rule']['metric'] for r in data if 'gnocchi_resources_threshold_rule' in r)) def test_post_gnocchi_resources_alarm(self): json = { 'enabled': False, 'name': 'name_post', 'state': 'ok', 'type': 'gnocchi_resources_threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'gnocchi_resources_threshold_rule': { 'metric': 'ameter', 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': 3, 'granularity': 180, 'resource_type': 'instance', 'resource_id': '209ef69c-c10c-4efb-90ff-46f4b2d90d2e', } } with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: c = clientlib.Client.return_value c.capabilities.list.side_effect = Exception("boom!") resp = self.post_json('/alarms', params=json, headers=self.auth_headers, expect_errors=True) self.assertEqual(503, resp.status_code, resp.body) with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: c = clientlib.Client.return_value c.capabilities.list.side_effect = ( exceptions.ClientException(500, "my_custom_error")) resp = self.post_json('/alarms', params=json, headers=self.auth_headers, expect_errors=True) self.assertEqual(500, resp.status_code, resp.body) self.assertIn('my_custom_error', resp.json['error_message']['faultstring']) with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: c = clientlib.Client.return_value c.capabilities.list.return_value = { 'aggregation_methods': ['count']} self.post_json('/alarms', params=json, headers=self.auth_headers) expected = [mock.call.capabilities.list(), mock.call.resource.get( "instance", "209ef69c-c10c-4efb-90ff-46f4b2d90d2e")] self.assertEqual(expected, c.mock_calls) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self._verify_alarm(json, alarms[0]) def test_post_gnocchi_metrics_alarm(self): json = { 'enabled': False, 'name': 'name_post', 'state': 'ok', 'type': 'gnocchi_aggregation_by_metrics_threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'gnocchi_aggregation_by_metrics_threshold_rule': { 'metrics': ['b3d9d8ab-05e8-439f-89ad-5e978dd2a5eb', '009d4faf-c275-46f0-8f2d-670b15bac2b0'], 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': 3, 'granularity': 180, } } with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: c = clientlib.Client.return_value c.capabilities.list.return_value = { 'aggregation_methods': ['count']} self.post_json('/alarms', params=json, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) self._verify_alarm(json, alarms[0]) def test_post_gnocchi_aggregation_alarm_project_constraint(self): json = { 'enabled': False, 'name': 'project_constraint', 'state': 'ok', 'type': 'gnocchi_aggregation_by_resources_threshold', 'severity': 'critical', 'ok_actions': ['http://something/ok'], 'alarm_actions': ['http://something/alarm'], 'insufficient_data_actions': ['http://something/no'], 'repeat_actions': True, 'gnocchi_aggregation_by_resources_threshold_rule': { 'metric': 'ameter', 'comparison_operator': 'le', 'aggregation_method': 'count', 'threshold': 50, 'evaluation_periods': 3, 'granularity': 180, 'resource_type': 'instance', 'query': '{"=": {"server_group": "my_autoscaling_group"}}', } } expected_query = {"and": [{"=": {"created_by_project_id": self.auth_headers['X-Project-Id']}}, {"=": {"server_group": "my_autoscaling_group"}}]} with mock.patch('aodh.api.controllers.v2.alarm_rules.' 'gnocchi.client') as clientlib: c = clientlib.Client.return_value c.capabilities.list.return_value = { 'aggregation_methods': ['count']} self.post_json('/alarms', params=json, headers=self.auth_headers) self.assertEqual([mock.call( aggregation='count', metrics='ameter', needed_overlap=0, query=expected_query, resource_type="instance")], c.metric.aggregation.mock_calls), alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) json['gnocchi_aggregation_by_resources_threshold_rule']['query'] = ( jsonutils.dumps(expected_query)) self._verify_alarm(json, alarms[0]) class TestAlarmsRuleGnocchiLegacy(LegacyPolicyFileMixin, TestAlarmsRuleGnocchi): pass class TestAlarmsEvent(TestAlarmsBase): def test_list_alarms(self): alarm = models.Alarm(name='event.alarm.1', type='event', enabled=True, alarm_id='h', description='h', state='insufficient data', severity='moderate', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=dict(event_type='event.test', query=[]), ) self.alarm_conn.update_alarm(alarm) data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(1, len(data)) self.assertEqual(set(['event.alarm.1']), set(r['name'] for r in data)) self.assertEqual(set(['event.test']), set(r['event_rule']['event_type'] for r in data if 'event_rule' in r)) def test_post_event_alarm_defaults(self): to_check = { 'enabled': True, 'name': 'added_alarm_defaults', 'state': 'insufficient data', 'description': 'Alarm when * event occurred.', 'type': 'event', 'ok_actions': [], 'alarm_actions': [], 'insufficient_data_actions': [], 'repeat_actions': False, 'rule': { 'event_type': '*', 'query': [], } } json = { 'name': 'added_alarm_defaults', 'type': 'event', 'event_rule': { 'event_type': '*', 'query': [] } } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(alarms)) for alarm in alarms: if alarm.name == 'added_alarm_defaults': for key in to_check: self.assertEqual(to_check[key], getattr(alarm, key)) break else: self.fail("Alarm not found") class TestAlarmsCompositeRule(TestAlarmsBase): def setUp(self): super(TestAlarmsCompositeRule, self).setUp() self.sub_rule1 = { "type": "threshold", "meter_name": "cpu_util", "evaluation_periods": 5, "threshold": 0.8, "query": [{ "field": "metadata.metering.stack_id", "value": "36b20eb3-d749-4964-a7d2-a71147cd8147", "op": "eq" }], "statistic": "avg", "period": 60, "exclude_outliers": False, "comparison_operator": "gt" } self.sub_rule2 = { "type": "threshold", "meter_name": "disk.iops", "evaluation_periods": 4, "threshold": 200, "query": [{ "field": "metadata.metering.stack_id", "value": "36b20eb3-d749-4964-a7d2-a71147cd8147", "op": "eq" }], "statistic": "max", "period": 60, "exclude_outliers": False, "comparison_operator": "gt" } self.sub_rule3 = { "type": "threshold", "meter_name": "network.incoming.packets.rate", "evaluation_periods": 3, "threshold": 1000, "query": [{ "field": "metadata.metering.stack_id", "value": "36b20eb3-d749-4964-a7d2-a71147cd8147", "op": "eq" }], "statistic": "avg", "period": 60, "exclude_outliers": False, "comparison_operator": "gt" } self.rule = { "or": [self.sub_rule1, { "and": [self.sub_rule2, self.sub_rule3] }]} def test_list_alarms(self): alarm = models.Alarm(name='composite_alarm', type='composite', enabled=True, alarm_id='composite', description='composite', state='insufficient data', severity='moderate', state_timestamp=constants.MIN_DATETIME, timestamp=constants.MIN_DATETIME, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=False, user_id=self.auth_headers['X-User-Id'], project_id=self.auth_headers['X-Project-Id'], time_constraints=[], rule=self.rule, ) self.alarm_conn.update_alarm(alarm) data = self.get_json('/alarms', headers=self.auth_headers) self.assertEqual(1, len(data)) self.assertEqual(set(['composite_alarm']), set(r['name'] for r in data)) self.assertEqual(self.rule, data[0]['composite_rule']) def test_post_with_composite_rule(self): json = { "type": "composite", "name": "composite_alarm", "composite_rule": self.rule, "repeat_actions": False } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(alarms)) self.assertEqual(self.rule, alarms[0].rule) def test_post_with_sub_rule_with_wrong_type(self): self.sub_rule1['type'] = 'non-type' json = { "type": "composite", "name": "composite_alarm", "composite_rule": self.rule, "repeat_actions": False } response = self.post_json('/alarms', params=json, status=400, expect_errors=True, headers=self.auth_headers) err = ("Unsupported sub-rule type :non-type in composite " "rule, should be one of: " "['gnocchi_aggregation_by_metrics_threshold', " "'gnocchi_aggregation_by_resources_threshold', " "'gnocchi_resources_threshold', 'threshold']") faultstring = response.json['error_message']['faultstring'] self.assertEqual(err, faultstring) def test_post_with_sub_rule_with_only_required_params(self): sub_rulea = { "meter_name": "cpu_util", "threshold": 0.8, "type": "threshold"} sub_ruleb = { "meter_name": "disk.iops", "threshold": 200, "type": "threshold"} json = { "type": "composite", "name": "composite_alarm", "composite_rule": {"and": [sub_rulea, sub_ruleb]}, "repeat_actions": False } self.post_json('/alarms', params=json, status=201, headers=self.auth_headers) alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(alarms)) def test_post_with_sub_rule_with_invalid_params(self): self.sub_rule1['threshold'] = False json = { "type": "composite", "name": "composite_alarm", "composite_rule": self.rule, "repeat_actions": False } response = self.post_json('/alarms', params=json, status=400, expect_errors=True, headers=self.auth_headers) faultstring = ("Invalid input for field/attribute threshold. " "Value: 'False'. Wrong type. Expected '', got ''") self.assertEqual(faultstring, response.json['error_message']['faultstring']) aodh-2.0.6/aodh/tests/functional/api/v2/test_wsme_custom_type.py0000664000567000056710000000213613076064371026142 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base import wsme from aodh.api.controllers.v2 import base as v2_base class TestWsmeCustomType(base.BaseTestCase): def test_advenum_default(self): class dummybase(wsme.types.Base): ae = v2_base.AdvEnum("name", str, "one", "other", default="other") obj = dummybase() self.assertEqual("other", obj.ae) obj = dummybase(ae="one") self.assertEqual("one", obj.ae) self.assertRaises(wsme.exc.InvalidInput, dummybase, ae="not exists") aodh-2.0.6/aodh/tests/functional/api/v2/policy.json-test0000664000567000056710000000036113076064371024270 0ustar jenkinsjenkins00000000000000{ "context_is_admin": "role:admin", "segregation": "rule:context_is_admin", "admin_or_owner": "rule:context_is_admin or project_id:%(project_id)s", "default": "rule:admin_or_owner", "telemetry:get_alarms": "role:admin" } aodh-2.0.6/aodh/tests/functional/api/v2/test_app.py0000664000567000056710000002011013076064371023304 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp. # Copyright 2013 Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test basic aodh-api app """ import json import mock import six import wsme from aodh import i18n from aodh.tests.functional.api import v2 class TestApiMiddleware(v2.FunctionalTest): no_lang_translated_error = 'No lang translated error' en_US_translated_error = 'en-US translated error' def _fake_translate(self, message, user_locale): if user_locale is None: return self.no_lang_translated_error else: return self.en_US_translated_error def test_json_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml;q=0.8, \ application/json"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "text/html,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) def test_json_parsable_error_middleware_translation_400(self): # Ensure translated messages get placed properly into json faults with mock.patch.object(i18n, 'translate', side_effect=self._fake_translate): response = self.post_json('/alarms', params={'name': 'foobar', 'type': 'threshold'}, expect_errors=True, headers={"Accept": "application/json"} ) self.assertEqual(400, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) self.assertEqual(self.no_lang_translated_error, response.json['error_message']['faultstring']) def test_xml_parsable_error_middleware_404(self): response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/xml,*/*"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) response = self.get_json('/invalid_path', expect_errors=True, headers={"Accept": "application/json;q=0.8 \ ,application/xml"} ) self.assertEqual(404, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) def test_xml_parsable_error_middleware_translation_400(self): # Ensure translated messages get placed properly into xml faults with mock.patch.object(i18n, 'translate', side_effect=self._fake_translate): response = self.post_json('/alarms', params={'name': 'foobar', 'type': 'threshold'}, expect_errors=True, headers={"Accept": "application/xml,*/*"} ) self.assertEqual(400, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) fault = response.xml.findall('./error/faultstring') for fault_string in fault: self.assertEqual(self.no_lang_translated_error, fault_string.text) def test_best_match_language(self): # Ensure that we are actually invoking language negotiation with mock.patch.object(i18n, 'translate', side_effect=self._fake_translate): response = self.post_json('/alarms', params={'name': 'foobar', 'type': 'threshold'}, expect_errors=True, headers={"Accept": "application/xml,*/*", "Accept-Language": "en-US"} ) self.assertEqual(400, response.status_int) self.assertEqual("application/xml", response.content_type) self.assertEqual('error_message', response.xml.tag) fault = response.xml.findall('./error/faultstring') for fault_string in fault: self.assertEqual(self.en_US_translated_error, fault_string.text) def test_translated_then_untranslated_error(self): resp = self.get_json('/alarms/alarm-id-3', expect_errors=True) self.assertEqual(404, resp.status_code) body = resp.body if six.PY3: body = body.decode('utf-8') self.assertEqual("Alarm alarm-id-3 not found", json.loads(body)['error_message'] ['faultstring']) with mock.patch('aodh.api.controllers.' 'v2.base.AlarmNotFound') as CustomErrorClass: CustomErrorClass.return_value = wsme.exc.ClientSideError( "untranslated_error", status_code=404) resp = self.get_json('/alarms/alarm-id-5', expect_errors=True) self.assertEqual(404, resp.status_code) body = resp.body if six.PY3: body = body.decode('utf-8') self.assertEqual("untranslated_error", json.loads(body)['error_message'] ['faultstring']) aodh-2.0.6/aodh/tests/functional/api/v2/test_capabilities.py0000664000567000056710000000225613076064372025171 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2014. All rights reserved # # Authors: Ildiko Vancsa # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from aodh.tests.functional.api import v2 as tests_api class TestCapabilitiesController(tests_api.FunctionalTest): def setUp(self): super(TestCapabilitiesController, self).setUp() self.url = '/capabilities' def test_capabilities(self): data = self.get_json(self.url) # check that capabilities data contains both 'api' and 'storage' fields self.assertIsNotNone(data) self.assertNotEqual({}, data) self.assertIn('api', data) self.assertIn('alarm_storage', data) aodh-2.0.6/aodh/tests/functional/api/v2/test_acl_scenarios.py0000664000567000056710000000353713076064372025350 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test ACL.""" import mock import webtest from aodh.api import app from aodh.tests.functional.api import v2 class TestAPIACL(v2.FunctionalTest): def _make_app(self): file_name = self.path_get('etc/aodh/api_paste.ini') self.CONF.set_override("paste_config", file_name, "api") # We need the other call to prepare_service in app.py to return the # same tweaked conf object. with mock.patch('aodh.service.prepare_service') as ps: ps.return_value = self.CONF return webtest.TestApp(app.load_app(conf=self.CONF)) def test_non_authenticated(self): response = self.get_json('/meters', expect_errors=True) self.assertEqual(401, response.status_int) def test_authenticated_wrong_role(self): response = self.get_json('/meters', expect_errors=True, headers={ "X-Roles": "Member", "X-Tenant-Name": "admin", "X-Project-Id": "bc23a9d531064583ace8f67dad60f6bb", }) self.assertEqual(401, response.status_int) aodh-2.0.6/aodh/tests/functional/api/v2/__init__.py0000664000567000056710000000130413076064371023230 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from aodh.tests.functional import api class FunctionalTest(api.FunctionalTest): PATH_PREFIX = '/v2' aodh-2.0.6/aodh/tests/functional/api/v2/test_complex_query_scenarios.py0000664000567000056710000003377313076064372027512 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests complex queries for samples """ import datetime from oslo_utils import timeutils from aodh.storage import models from aodh.tests.functional.api import v2 as tests_api admin_header = {"X-Roles": "admin", "X-Project-Id": "project-id1"} non_admin_header = {"X-Roles": "Member", "X-Project-Id": "project-id1"} class TestQueryAlarmsController(tests_api.FunctionalTest): def setUp(self): super(TestQueryAlarmsController, self).setUp() self.alarm_url = '/query/alarms' for state in ['ok', 'alarm', 'insufficient data']: for date in [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 2, 2)]: for id in [1, 2]: alarm_id = "-".join([state, date.isoformat(), str(id)]) project_id = "project-id%d" % id alarm = models.Alarm(name=alarm_id, type='threshold', enabled=True, alarm_id=alarm_id, description='a', state=state, state_timestamp=date, timestamp=date, ok_actions=[], insufficient_data_actions=[], alarm_actions=[], repeat_actions=True, user_id="user-id%d" % id, project_id=project_id, time_constraints=[], rule=dict(comparison_operator='gt', threshold=2.0, statistic='avg', evaluation_periods=60, period=1, meter_name='meter.test', query=[{'field': 'project_id', 'op': 'eq', 'value': project_id}]), severity='critical') self.alarm_conn.update_alarm(alarm) def test_query_all(self): data = self.post_json(self.alarm_url, headers=admin_header, params={}) self.assertEqual(12, len(data.json)) def test_filter_with_isotime_timestamp(self): date_time = datetime.datetime(2013, 1, 1) isotime = date_time.isoformat() data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{">": {"timestamp": "' + isotime + '"}}'}) self.assertEqual(6, len(data.json)) for alarm in data.json: result_time = timeutils.parse_isotime(alarm['timestamp']) result_time = result_time.replace(tzinfo=None) self.assertTrue(result_time > date_time) def test_filter_with_isotime_state_timestamp(self): date_time = datetime.datetime(2013, 1, 1) isotime = date_time.isoformat() data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{">": {"state_timestamp": "' + isotime + '"}}'}) self.assertEqual(6, len(data.json)) for alarm in data.json: result_time = timeutils.parse_isotime(alarm['state_timestamp']) result_time = result_time.replace(tzinfo=None) self.assertTrue(result_time > date_time) def test_non_admin_tenant_sees_only_its_own_project(self): data = self.post_json(self.alarm_url, params={}, headers=non_admin_header) for alarm in data.json: self.assertEqual("project-id1", alarm['project_id']) def test_non_admin_tenant_cannot_query_others_project(self): data = self.post_json(self.alarm_url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, expect_errors=True, headers=non_admin_header) self.assertEqual(401, data.status_int) self.assertIn(b"Not Authorized to access project project-id2", data.body) def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): data = self.post_json(self.alarm_url, params={"filter": '{"=": {"project_id": "project-id1"}}'}, headers=non_admin_header) for alarm in data.json: self.assertEqual("project-id1", alarm['project_id']) def test_admin_tenant_sees_every_project(self): data = self.post_json(self.alarm_url, params={}, headers=admin_header) self.assertEqual(12, len(data.json)) for alarm in data.json: self.assertIn(alarm['project_id'], (["project-id1", "project-id2"])) def test_admin_tenant_can_query_any_project(self): data = self.post_json(self.alarm_url, params={"filter": '{"=": {"project_id": "project-id2"}}'}, headers=admin_header) self.assertEqual(6, len(data.json)) for alarm in data.json: self.assertIn(alarm['project_id'], set(["project-id2"])) def test_query_with_field_project(self): data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{"=": {"project": "project-id2"}}'}) self.assertEqual(6, len(data.json)) for sample_item in data.json: self.assertIn(sample_item['project_id'], set(["project-id2"])) def test_query_with_field_user_in_orderby(self): data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{"=": {"state": "alarm"}}', "orderby": '[{"user": "DESC"}]'}) self.assertEqual(4, len(data.json)) self.assertEqual(["user-id2", "user-id2", "user-id1", "user-id1"], [s["user_id"] for s in data.json]) def test_query_with_filter_orderby_and_limit(self): orderby = '[{"state_timestamp": "DESC"}]' data = self.post_json(self.alarm_url, headers=admin_header, params={"filter": '{"=": {"state": "alarm"}}', "orderby": orderby, "limit": 3}) self.assertEqual(3, len(data.json)) self.assertEqual(["2013-02-02T00:00:00", "2013-02-02T00:00:00", "2013-01-01T00:00:00"], [a["state_timestamp"] for a in data.json]) for alarm in data.json: self.assertEqual("alarm", alarm["state"]) def test_limit_should_be_positive(self): data = self.post_json(self.alarm_url, headers=admin_header, params={"limit": 0}, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Limit should be positive", data.body) class TestQueryAlarmsHistoryController(tests_api.FunctionalTest): def setUp(self): super(TestQueryAlarmsHistoryController, self).setUp() self.url = '/query/alarms/history' for id in [1, 2]: for type in ["creation", "state transition"]: for date in [datetime.datetime(2013, 1, 1), datetime.datetime(2013, 2, 2)]: event_id = "-".join([str(id), type, date.isoformat()]) alarm_change = {"event_id": event_id, "alarm_id": "alarm-id%d" % id, "type": type, "detail": "", "user_id": "user-id%d" % id, "project_id": "project-id%d" % id, "on_behalf_of": "project-id%d" % id, "timestamp": date} self.alarm_conn.record_alarm_change(alarm_change) def test_query_all(self): data = self.post_json(self.url, headers=admin_header, params={}) self.assertEqual(8, len(data.json)) def test_filter_with_isotime(self): date_time = datetime.datetime(2013, 1, 1) isotime = date_time.isoformat() data = self.post_json(self.url, headers=admin_header, params={"filter": '{">": {"timestamp":"' + isotime + '"}}'}) self.assertEqual(4, len(data.json)) for history in data.json: result_time = timeutils.parse_isotime(history['timestamp']) result_time = result_time.replace(tzinfo=None) self.assertTrue(result_time > date_time) def test_non_admin_tenant_sees_only_its_own_project(self): data = self.post_json(self.url, params={}, headers=non_admin_header) for history in data.json: self.assertEqual("project-id1", history['on_behalf_of']) def test_non_admin_tenant_cannot_query_others_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"on_behalf_of":' + ' "project-id2"}}'}, expect_errors=True, headers=non_admin_header) self.assertEqual(401, data.status_int) self.assertIn(b"Not Authorized to access project project-id2", data.body) def test_non_admin_tenant_can_explicitly_filter_for_own_project(self): data = self.post_json(self.url, params={"filter": '{"=": {"on_behalf_of":' + ' "project-id1"}}'}, headers=non_admin_header) for history in data.json: self.assertEqual("project-id1", history['on_behalf_of']) def test_admin_tenant_sees_every_project(self): data = self.post_json(self.url, params={}, headers=admin_header) self.assertEqual(8, len(data.json)) for history in data.json: self.assertIn(history['on_behalf_of'], (["project-id1", "project-id2"])) def test_query_with_filter_for_project_orderby_with_user(self): data = self.post_json(self.url, headers=admin_header, params={"filter": '{"=": {"project": "project-id1"}}', "orderby": '[{"user": "DESC"}]', "limit": 3}) self.assertEqual(3, len(data.json)) self.assertEqual(["user-id1", "user-id1", "user-id1"], [h["user_id"] for h in data.json]) for history in data.json: self.assertEqual("project-id1", history['project_id']) def test_query_with_filter_orderby_and_limit(self): data = self.post_json(self.url, headers=admin_header, params={"filter": '{"=": {"type": "creation"}}', "orderby": '[{"timestamp": "DESC"}]', "limit": 3}) self.assertEqual(3, len(data.json)) self.assertEqual(["2013-02-02T00:00:00", "2013-02-02T00:00:00", "2013-01-01T00:00:00"], [h["timestamp"] for h in data.json]) for history in data.json: self.assertEqual("creation", history['type']) def test_limit_should_be_positive(self): data = self.post_json(self.url, params={"limit": 0}, headers=admin_header, expect_errors=True) self.assertEqual(400, data.status_int) self.assertIn(b"Limit should be positive", data.body) aodh-2.0.6/aodh/tests/functional/api/v2/test_complex_query.py0000664000567000056710000002346113076064372025435 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to complex query.""" import datetime import fixtures import jsonschema import mock from oslotest import base import wsme from aodh.api.controllers.v2 import query from aodh.storage import models as alarm_models class FakeComplexQuery(query.ValidatedComplexQuery): def __init__(self, db_model, additional_name_mapping=None, metadata=False): super(FakeComplexQuery, self).__init__(query=None, db_model=db_model, additional_name_mapping=( additional_name_mapping or {}), metadata_allowed=metadata) sample_name_mapping = {"resource": "resource_id", "meter": "counter_name", "type": "counter_type", "unit": "counter_unit", "volume": "counter_volume"} class TestComplexQuery(base.BaseTestCase): def setUp(self): super(TestComplexQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) self.query = FakeComplexQuery(alarm_models.Alarm) self.query_alarmchange = FakeComplexQuery( alarm_models.AlarmChange) def test_replace_isotime_utc(self): filter_expr = {"=": {"timestamp": "2013-12-05T19:38:29Z"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_timezone_removed(self): filter_expr = {"=": {"timestamp": "2013-12-05T20:38:29+01:00"}} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 20, 38, 29), filter_expr["="]["timestamp"]) def test_replace_isotime_wrong_syntax(self): filter_expr = {"=": {"timestamp": "not a valid isotime string"}} self.assertRaises(wsme.exc.ClientSideError, self.query._replace_isotime_with_datetime, filter_expr) def test_replace_isotime_in_complex_filter(self): filter_expr = {"and": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["and"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["and"][1]["="]["timestamp"]) def test_replace_isotime_in_complex_filter_with_unbalanced_tree(self): subfilter = {"and": [{"=": {"project_id": 42}}, {"=": {"timestamp": "2013-12-06T19:38:29Z"}}]} filter_expr = {"or": [{"=": {"timestamp": "2013-12-05T19:38:29Z"}}, subfilter]} self.query._replace_isotime_with_datetime(filter_expr) self.assertEqual(datetime.datetime(2013, 12, 5, 19, 38, 29), filter_expr["or"][0]["="]["timestamp"]) self.assertEqual(datetime.datetime(2013, 12, 6, 19, 38, 29), filter_expr["or"][1]["and"][1]["="]["timestamp"]) def test_convert_operator_to_lower_case(self): filter_expr = {"AND": [{"=": {"project_id": 42}}, {"=": {"project_id": 44}}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("and", list(filter_expr.keys())[0]) filter_expr = {"Or": [{"=": {"project_id": 43}}, {"anD": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.query._convert_operator_to_lower_case(filter_expr) self.assertEqual("or", list(filter_expr.keys())[0]) self.assertEqual("and", list(filter_expr["or"][1].keys())[0]) def test_invalid_filter_misstyped_field_name_samples(self): filter = {"=": {"project_id11": 42}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_filter_misstyped_field_name_alarms(self): filter = {"=": {"enabbled": True}} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_invalid_filter_misstyped_field_name_alarmchange(self): filter = {"=": {"tpe": "rule change"}} self.assertRaises(jsonschema.ValidationError, self.query_alarmchange._validate_filter, filter) def test_invalid_complex_filter_wrong_field_names(self): filter = {"and": [{"=": {"non_existing_field": 42}}, {"=": {"project_id": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"and": [{"=": {"project_id": 42}}, {"=": {"non_existing_field": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"and": [{"=": {"project_id11": 42}}, {"=": {"project_id": 42}}]} self.assertRaises(jsonschema.ValidationError, self.query_alarmchange._validate_filter, filter) filter = {"or": [{"=": {"non_existing_field": 42}}, {"and": [{"=": {"project_id": 44}}, {"=": {"project_id": 42}}]}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) filter = {"or": [{"=": {"project_id": 43}}, {"and": [{"=": {"project_id": 44}}, {"=": {"non_existing_field": 42}}]}]} self.assertRaises(jsonschema.ValidationError, self.query._validate_filter, filter) def test_convert_orderby(self): orderby = [] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([], orderby) orderby = [{"project_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "desc"}], orderby) orderby = [{"project_id": "ASC"}, {"resource_id": "DESC"}] self.query._convert_orderby_to_lower_case(orderby) self.assertEqual([{"project_id": "asc"}, {"resource_id": "desc"}], orderby) def test_validate_orderby_empty_direction(self): orderby = [{"project_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"resource_id": ""}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_order_string(self): orderby = [{"project_id": "not a valid order"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_multiple_item_order_string(self): orderby = [{"project_id": "not a valid order"}, {"resource_id": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_empty_field_name(self): orderby = [{"": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) orderby = [{"project_id": "asc"}, {"": "desc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name(self): orderby = [{"project_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_wrong_field_name_multiple_item_orderby(self): orderby = [{"project_id": "asc"}, {"resource_id11": "ASC"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) def test_validate_orderby_metadata_is_not_allowed(self): orderby = [{"metadata.display_name": "asc"}] self.assertRaises(jsonschema.ValidationError, self.query._validate_orderby, orderby) aodh-2.0.6/aodh/tests/functional/api/v2/test_query.py0000664000567000056710000003362513076064371023710 0ustar jenkinsjenkins00000000000000# Copyright 2013 OpenStack Foundation. # All Rights Reserved. # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test the methods related to query.""" import datetime import fixtures import mock from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import wsme from aodh.api.controllers.v2 import base as v2_base from aodh.api.controllers.v2 import utils from aodh import storage from aodh.storage import base as alarm_storage_base from aodh.tests import base as tests_base class TestQuery(base.BaseTestCase): def setUp(self): super(TestQuery, self).setUp() self.useFixture(fixtures.MonkeyPatch( 'pecan.response', mock.MagicMock())) def test_get_value_as_type_with_integer(self): query = v2_base.Query(field='metadata.size', op='eq', value='123', type='integer') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='float') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True', type='boolean') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux', type='string') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_datetime(self): query = v2_base.Query(field='metadata.date', op='eq', value='2014-01-01T05:00:00', type='datetime') self.assertIsInstance(query._get_value_as_type(), datetime.datetime) self.assertIsNone(query._get_value_as_type().tzinfo) def test_get_value_as_type_with_integer_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123') expected = 123 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_float_without_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456') expected = 123.456 self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_boolean_without_type(self): query = v2_base.Query(field='metadata.is_public', op='eq', value='True') expected = True self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_string_without_type(self): query = v2_base.Query(field='metadata.name', op='eq', value='linux') expected = 'linux' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_bad_type(self): query = v2_base.Query(field='metadata.size', op='eq', value='123.456', type='blob') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_with_bad_value(self): query = v2_base.Query(field='metadata.size', op='eq', value='fake', type='integer') self.assertRaises(wsme.exc.ClientSideError, query._get_value_as_type) def test_get_value_as_type_integer_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='WWW-Layer-4a80714f') expected = 'WWW-Layer-4a80714f' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_boolean_expression_without_type(self): # bug 1221736 query = v2_base.Query(field='should_be_a_string', op='eq', value='True or False') expected = 'True or False' self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error(self): # bug 1221736 value = 'WWW-Layer-4a80714f-0232-4580-aa5e-81494d1a4147-uolhh25p5xxm' query = v2_base.Query(field='group_id', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) def test_get_value_as_type_with_syntax_error_colons(self): # bug 1221736 value = 'Ref::StackId' query = v2_base.Query(field='field_name', op='eq', value=value) expected = value self.assertEqual(expected, query._get_value_as_type()) class TestQueryToKwArgs(tests_base.BaseTestCase): def setUp(self): super(TestQueryToKwArgs, self).setUp() self.useFixture(mockpatch.PatchObject( utils, 'sanitize_query', side_effect=lambda x, y, **z: x)) self.useFixture(mockpatch.PatchObject( utils, '_verify_query_segregation', side_effect=lambda x, **z: x)) def test_sample_filter_single(self): q = [v2_base.Query(field='user_id', op='eq', value='uid')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertIn('user', kwargs) self.assertEqual(1, len(kwargs)) self.assertEqual('uid', kwargs['user']) def test_sample_filter_multi(self): q = [v2_base.Query(field='user_id', op='eq', value='uid'), v2_base.Query(field='project_id', op='eq', value='pid'), v2_base.Query(field='resource_id', op='eq', value='rid'), v2_base.Query(field='source', op='eq', value='source_name'), v2_base.Query(field='meter', op='eq', value='meter_name')] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(5, len(kwargs)) self.assertEqual('uid', kwargs['user']) self.assertEqual('pid', kwargs['project']) self.assertEqual('rid', kwargs['resource']) self.assertEqual('source_name', kwargs['source']) self.assertEqual('meter_name', kwargs['meter']) def test_sample_filter_timestamp(self): ts_start = timeutils.utcnow() ts_end = ts_start + datetime.timedelta(minutes=5) q = [v2_base.Query(field='timestamp', op='lt', value=str(ts_end)), v2_base.Query(field='timestamp', op='gt', value=str(ts_start))] kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__) self.assertEqual(4, len(kwargs)) self.assertTimestampEqual(kwargs['start_timestamp'], ts_start) self.assertTimestampEqual(kwargs['end_timestamp'], ts_end) self.assertEqual('gt', kwargs['start_timestamp_op']) self.assertEqual('lt', kwargs['end_timestamp_op']) def test_sample_filter_non_equality_on_metadata(self): queries = [v2_base.Query(field='resource_metadata.image_id', op='gt', value='image', type='string'), v2_base.Query(field='metadata.ramdisk_id', op='le', value='ramdisk', type='string')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, queries, storage.SampleFilter.__init__) def test_sample_filter_invalid_field(self): q = [v2_base.Query(field='invalid', op='eq', value='20')] self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_invalid_op(self): q = [v2_base.Query(field='user_id', op='lt', value='20')] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_timestamp_invalid_op(self): ts_start = timeutils.utcnow() q = [v2_base.Query(field='timestamp', op='eq', value=str(ts_start))] self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) def test_sample_filter_exclude_internal(self): queries = [v2_base.Query(field=f, op='eq', value='fake', type='string') for f in ['y', 'on_behalf_of', 'x']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} self.assertRaises(wsme.exc.ClientSideError, utils.query_to_kwargs, queries, storage.SampleFilter.__init__, internal_keys=['on_behalf_of']) def test_sample_filter_self_always_excluded(self): queries = [v2_base.Query(field='user_id', op='eq', value='20')] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) self.assertNotIn('self', kwargs) def test_sample_filter_translation(self): queries = [v2_base.Query(field=f, op='eq', value='fake_%s' % f, type='string') for f in ['user_id', 'project_id', 'resource_id']] with mock.patch('pecan.request') as request: request.headers.return_value = {'X-ProjectId': 'foobar'} kwargs = utils.query_to_kwargs(queries, storage.SampleFilter.__init__) for o in ['user', 'project', 'resource']: self.assertEqual('fake_%s_id' % o, kwargs.get(o)) def test_timestamp_validation(self): q = [v2_base.Query(field='timestamp', op='le', value='123')] exc = self.assertRaises( wsme.exc.InvalidInput, utils.query_to_kwargs, q, storage.SampleFilter.__init__) expected_exc = wsme.exc.InvalidInput('timestamp', '123', 'invalid timestamp format') self.assertEqual(str(expected_exc), str(exc)) def test_get_alarm_changes_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, alarm_storage_base.Connection.get_alarm_changes) valid_keys = ['alarm_id', 'on_behalf_of', 'project', 'search_offset', 'severity', 'timestamp', 'type', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) def test_get_alarms_filter_valid_fields(self): q = [v2_base.Query(field='abc', op='eq', value='abc')] exc = self.assertRaises( wsme.exc.UnknownArgument, utils.query_to_kwargs, q, alarm_storage_base.Connection.get_alarms) valid_keys = ['alarm_id', 'enabled', 'exclude', 'meter', 'name', 'project', 'severity', 'state', 'type', 'user'] msg = ("unrecognized field in query: %s, " "valid keys: %s") % (q, valid_keys) expected_exc = wsme.exc.UnknownArgument('abc', msg) self.assertEqual(str(expected_exc), str(exc)) aodh-2.0.6/aodh/tests/functional/hooks/0000775000567000056710000000000013076064720021142 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/hooks/post_test_hook.sh0000775000567000056710000000455213076064371024555 0ustar jenkinsjenkins00000000000000#!/bin/bash -xe # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside post_test_hook function in devstack gate. set -e function generate_testr_results { if [ -f .testrepository/0 ]; then sudo .tox/functional/bin/testr last --subunit > $WORKSPACE/testrepository.subunit sudo mv $WORKSPACE/testrepository.subunit $BASE/logs/testrepository.subunit sudo /usr/os-testr-env/bin/subunit2html $BASE/logs/testrepository.subunit $BASE/logs/testr_results.html sudo gzip -9 $BASE/logs/testrepository.subunit sudo gzip -9 $BASE/logs/testr_results.html sudo chown jenkins:jenkins $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz sudo chmod a+r $BASE/logs/testrepository.subunit.gz $BASE/logs/testr_results.html.gz fi } # If we're running in the gate find our keystone endpoint to give to # gabbi tests and do a chown. Otherwise the existing environment # should provide URL and TOKEN. if [ -d $BASE/new/devstack ]; then export AODH_DIR="$BASE/new/aodh" STACK_USER=stack sudo chown -R $STACK_USER:stack $AODH_DIR source $BASE/new/devstack/openrc admin admin if [ $OS_IDENTITY_API_VERSION == '2.0' ]; then urltag='publicURL' else urltag='public' fi openstack catalog list export AODH_SERVICE_URL=$(openstack catalog show alarming -c endpoints -f value | awk "/$urltag"'/{print $2}') export AODH_SERVICE_TOKEN=$(openstack token issue -c id -f value) # Go to the aodh dir cd $AODH_DIR fi # Run tests echo "Running aodh functional test suite" set +e # NOTE(ityaptin) Expect a script param which contains at least one backend name AODH_TEST_BACKEND="${1:?test backend required}" sudo -E -H -u ${STACK_USER:-${USER}} tox -efunctional EXIT_CODE=$? set -e # Collect and parse result if [ -n "$AODH_DIR" ]; then generate_testr_results fi exit $EXIT_CODE aodh-2.0.6/aodh/tests/functional/__init__.py0000664000567000056710000000000013076064371022120 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/storage/0000775000567000056710000000000013076064720021463 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/storage/test_impl_mongodb.py0000664000567000056710000000760713076064372025557 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/storage/impl_mongodb.py .. note:: In order to run the tests against another MongoDB server set the environment variable aodh_TEST_MONGODB_URL to point to a MongoDB server before running the tests. """ import unittest try: from aodh.storage import impl_mongodb except ImportError: impl_mongodb = None from aodh.tests import base as test_base from aodh.tests.functional import db as tests_db @unittest.skipUnless(impl_mongodb, "pymongo not available") @tests_db.run_with('mongodb') class MongoDBConnection(tests_db.TestBase): def test_connection_pooling(self): test_conn = impl_mongodb.Connection(self.CONF, self.CONF.database.connection) self.assertEqual(self.alarm_conn.conn, test_conn.conn) def test_replica_set(self): url = self.CONF.database.connection + '?replicaSet=foobar' conn = impl_mongodb.Connection(self.CONF, url) self.assertTrue(conn.conn) @unittest.skipUnless(impl_mongodb, "pymongo not available") @tests_db.run_with('mongodb') class IndexTest(tests_db.TestBase): def _test_ttl_index_absent(self, conn, coll_name, ttl_opt): # create a fake index and check it is deleted coll = getattr(conn.db, coll_name) index_name = '%s_ttl' % coll_name self.CONF.set_override(ttl_opt, -1, group='database', enforce_type=True) conn.upgrade() self.assertNotIn(index_name, coll.index_information()) self.CONF.set_override(ttl_opt, 456789, group='database', enforce_type=True) conn.upgrade() self.assertEqual(456789, coll.index_information() [index_name]['expireAfterSeconds']) def test_alarm_history_ttl_index_absent(self): self._test_ttl_index_absent(self.alarm_conn, 'alarm_history', 'alarm_history_time_to_live') def _test_ttl_index_present(self, conn, coll_name, ttl_opt): coll = getattr(conn.db, coll_name) self.CONF.set_override(ttl_opt, 456789, group='database', enforce_type=True) conn.upgrade() index_name = '%s_ttl' % coll_name self.assertEqual(456789, coll.index_information() [index_name]['expireAfterSeconds']) self.CONF.set_override(ttl_opt, -1, group='database', enforce_type=True) conn.upgrade() self.assertNotIn(index_name, coll.index_information()) def test_alarm_history_ttl_index_present(self): self._test_ttl_index_present(self.alarm_conn, 'alarm_history', 'alarm_history_time_to_live') class CapabilitiesTest(test_base.BaseTestCase): @unittest.skipUnless(impl_mongodb, "pymongo not available") def test_alarm_capabilities(self): expected_capabilities = { 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, } actual_capabilities = impl_mongodb.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) aodh-2.0.6/aodh/tests/functional/storage/test_data_migration.py0000664000567000056710000001247513076064372026072 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import uuid import mock from oslo_config import fixture as fixture_config from aodh.cmd import data_migration from aodh import service from aodh import storage from aodh.storage import models as alarm_models from aodh.tests.functional import db as tests_db from aodh.tests.functional.storage import test_storage_scenarios @tests_db.run_with('hbase', 'mongodb') class TestDataMigration(test_storage_scenarios.AlarmTestBase): def setUp(self): sql_conf = service.prepare_service(argv=[], config_files=[]) self.sql_conf = self.useFixture(fixture_config.Config(sql_conf)).conf # using sqlite to represent the type of SQL dbs self.sql_conf.set_override('connection', "sqlite://", group="database", enforce_type=True) self.sql_namager = tests_db.SQLiteManager(self.sql_conf) self.useFixture(self.sql_namager) self.sql_conf.set_override('connection', self.sql_namager.url, group="database", enforce_type=True) self.sql_alarm_conn = storage.get_connection_from_config(self.sql_conf) self.sql_alarm_conn.upgrade() super(TestDataMigration, self).setUp() self.add_some_alarms() self._add_some_alarm_changes() def tearDown(self): self.sql_alarm_conn.clear() self.sql_alarm_conn = None super(TestDataMigration, self).tearDown() def _add_some_alarm_changes(self): alarms = list(self.alarm_conn.get_alarms()) i = 0 for alarm in alarms: for change_type in [alarm_models.AlarmChange.CREATION, alarm_models.AlarmChange.RULE_CHANGE, alarm_models.AlarmChange.STATE_TRANSITION, alarm_models.AlarmChange.STATE_TRANSITION, alarm_models.AlarmChange.STATE_TRANSITION]: alarm_change = { "event_id": str(uuid.uuid4()), "alarm_id": alarm.alarm_id, "type": change_type, "detail": "detail %s" % alarm.name, "user_id": alarm.user_id, "project_id": alarm.project_id, "on_behalf_of": alarm.project_id, "timestamp": datetime.datetime(2014, 4, 7, 7, 30 + i) } self.alarm_conn.record_alarm_change(alarm_change=alarm_change) i += 1 def test_data_migration_without_history_data(self): alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) alarms_sql = list(self.sql_alarm_conn.get_alarms()) self.assertEqual(0, len(alarms_sql)) test_args = data_migration.get_parser().parse_args( ['--sql-conn', 'sqlite://', '--nosql-conn', self.CONF.database.connection, '--migrate-history', False]) with mock.patch('argparse.ArgumentParser.parse_args') as args_parser: # because get_connection_from_config has been mocked in # aodh.tests.functional.db.TestBase#setUp, here re-mocked it that # this test can get nosql and sql storage connections with mock.patch('aodh.storage.get_connection_from_config') as conn: conn.side_effect = [self.alarm_conn, self.sql_alarm_conn] args_parser.return_value = test_args data_migration.main() alarms_sql = list(self.sql_alarm_conn.get_alarms()) alarm_changes = list(self.sql_alarm_conn.query_alarm_history()) self.assertEqual(0, len(alarm_changes)) self.assertEqual(3, len(alarms_sql)) self.assertEqual(sorted([a.alarm_id for a in alarms]), sorted([a.alarm_id for a in alarms_sql])) def test_data_migration_with_history_data(self): test_args = data_migration.get_parser().parse_args( ['--sql-conn', 'sqlite://', '--nosql-conn', self.CONF.database.connection]) with mock.patch('argparse.ArgumentParser.parse_args') as args_parser: # because get_connection_from_config has been mocked in # aodh.tests.functional.db.TestBase#setUp, here re-mocked it that # this test can get nosql and sql storage connections with mock.patch('aodh.storage.get_connection_from_config') as conn: conn.side_effect = [self.alarm_conn, self.sql_alarm_conn] args_parser.return_value = test_args data_migration.main() alarms_sql = list(self.sql_alarm_conn.get_alarms()) self.assertEqual(3, len(alarms_sql)) for alarm in alarms_sql: changes = list(self.sql_alarm_conn.get_alarm_changes( alarm.alarm_id, alarm.project_id)) self.assertEqual(5, len(changes)) aodh-2.0.6/aodh/tests/functional/storage/test_impl_hbase.py0000664000567000056710000000441513076064372025206 0ustar jenkinsjenkins00000000000000# # Copyright 2012, 2013 Dell Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/storage/impl_hbase.py .. note:: In order to run the tests against real HBase server set the environment variable aodh_TEST_HBASE_URL to point to that HBase instance before running the tests. Make sure the Thrift server is running on that server. """ import mock try: import happybase # noqa except ImportError: import testtools.testcase raise testtools.testcase.TestSkipped("happybase is needed") from aodh.storage import impl_hbase from aodh.tests import base as test_base from aodh.tests.functional import db as tests_db class ConnectionTest(tests_db.TestBase): @tests_db.run_with('hbase') def test_hbase_connection(self): class TestConn(object): def __init__(self, host, port): self.netloc = '%s:%s' % (host, port) def open(self): pass def get_connection_pool(conf): return TestConn(conf['host'], conf['port']) with mock.patch.object(impl_hbase.Connection, '_get_connection_pool', side_effect=get_connection_pool): conn = impl_hbase.Connection(self.CONF, 'hbase://test_hbase:9090') self.assertIsInstance(conn.conn_pool, TestConn) class CapabilitiesTest(test_base.BaseTestCase): def test_alarm_capabilities(self): expected_capabilities = { 'alarms': {'query': {'simple': True, 'complex': False}, 'history': {'query': {'simple': True, 'complex': False}}}, } actual_capabilities = impl_hbase.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) aodh-2.0.6/aodh/tests/functional/storage/test_get_connection.py0000664000567000056710000000724513076064372026105 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/storage/ """ import mock from oslo_config import fixture as fixture_config from oslotest import base import retrying from aodh import service from aodh import storage from aodh.storage import impl_log import six class EngineTest(base.BaseTestCase): def setUp(self): super(EngineTest, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_get_connection(self): self.CONF.set_override('connection', 'log://localhost', group='database', enforce_type=True) engine = storage.get_connection_from_config(self.CONF) self.assertIsInstance(engine, impl_log.Connection) def test_get_connection_no_such_engine(self): self.CONF.set_override('connection', 'no-such-engine://localhost', group='database', enforce_type=True) self.CONF.set_override('max_retries', 0, 'database', enforce_type=True) try: storage.get_connection_from_config(self.CONF) except RuntimeError as err: self.assertIn('no-such-engine', six.text_type(err)) class ConnectionRetryTest(base.BaseTestCase): def setUp(self): super(ConnectionRetryTest, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_retries(self): max_retries = 5 with mock.patch.object( retrying.Retrying, 'should_reject') as retry_reject: with mock.patch.object( storage.impl_log.Connection, '__init__') as log_init: class ConnectionError(Exception): pass def x(a, b): raise ConnectionError log_init.side_effect = x self.CONF.set_override("connection", "log://", "database", enforce_type=True) self.CONF.set_override("retry_interval", 0.00001, "database", enforce_type=True) self.CONF.set_override("max_retries", max_retries, "database", enforce_type=True) self.assertRaises(ConnectionError, storage.get_connection_from_config, self.CONF) self.assertEqual(max_retries, retry_reject.call_count) class ConnectionConfigTest(base.BaseTestCase): def setUp(self): super(ConnectionConfigTest, self).setUp() conf = service.prepare_service(argv=[], config_files=[]) self.CONF = self.useFixture(fixture_config.Config(conf)).conf def test_only_default_url(self): self.CONF.set_override("connection", "log://", group="database", enforce_type=True) conn = storage.get_connection_from_config(self.CONF) self.assertIsInstance(conn, impl_log.Connection) aodh-2.0.6/aodh/tests/functional/storage/test_impl_sqlalchemy.py0000664000567000056710000000252713076064371026267 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for aodh/storage/impl_sqlalchemy.py .. note:: In order to run the tests against real SQL server set the environment variable aodh_TEST_SQL_URL to point to a SQL server before running the tests. """ from aodh.storage import impl_sqlalchemy as impl_sqla_alarm from aodh.tests import base as test_base class CapabilitiesTest(test_base.BaseTestCase): def test_alarm_capabilities(self): expected_capabilities = { 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, } actual_capabilities = impl_sqla_alarm.Connection.get_capabilities() self.assertEqual(expected_capabilities, actual_capabilities) aodh-2.0.6/aodh/tests/functional/storage/__init__.py0000664000567000056710000000000013076064371023564 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/storage/test_storage_scenarios.py0000664000567000056710000005301613076064372026616 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Intel Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for DB backend implementation test """ import datetime import mock from oslo_utils import timeutils from aodh.storage import models as alarm_models from aodh.tests import constants from aodh.tests.functional import db as tests_db class DBTestBase(tests_db.TestBase): @staticmethod def create_side_effect(method, exception_type, test_exception): def side_effect(*args, **kwargs): if test_exception.pop(): raise exception_type else: return method(*args, **kwargs) return side_effect def setUp(self): super(DBTestBase, self).setUp() patcher = mock.patch.object(timeutils, 'utcnow') self.addCleanup(patcher.stop) self.mock_utcnow = patcher.start() self.mock_utcnow.return_value = datetime.datetime(2015, 7, 2, 10, 39) class AlarmTestBase(DBTestBase): def add_some_alarms(self): alarms = [alarm_models.Alarm(alarm_id='r3d', enabled=True, type='threshold', name='red-alert', description='my red-alert', timestamp=datetime.datetime(2015, 7, 2, 10, 25), user_id='me', project_id='and-da-boys', state="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=['http://nowhere/alarms'], insufficient_data_actions=[], repeat_actions=False, time_constraints=[dict(name='testcons', start='0 11 * * *', duration=300)], rule=dict(comparison_operator='eq', threshold=36, statistic='count', evaluation_periods=1, period=60, meter_name='test.one', query=[{'field': 'key', 'op': 'eq', 'value': 'value', 'type': 'string'}]), ), alarm_models.Alarm(alarm_id='0r4ng3', enabled=True, type='threshold', name='orange-alert', description='a orange', timestamp=datetime.datetime(2015, 7, 2, 10, 40), user_id='me', project_id='and-da-boys', state="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=['http://nowhere/alarms'], insufficient_data_actions=[], repeat_actions=False, time_constraints=[], rule=dict(comparison_operator='gt', threshold=75, statistic='avg', evaluation_periods=1, period=60, meter_name='test.forty', query=[{'field': 'key2', 'op': 'eq', 'value': 'value2', 'type': 'string'}]), ), alarm_models.Alarm(alarm_id='y3ll0w', enabled=False, type='threshold', name='yellow-alert', description='yellow', timestamp=datetime.datetime(2015, 7, 2, 10, 10), user_id='me', project_id='and-da-boys', state="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=['http://nowhere/alarms'], insufficient_data_actions=[], repeat_actions=False, time_constraints=[], rule=dict(comparison_operator='lt', threshold=10, statistic='min', evaluation_periods=1, period=60, meter_name='test.five', query=[{'field': 'key2', 'op': 'eq', 'value': 'value2', 'type': 'string'}, {'field': 'user_metadata.key3', 'op': 'eq', 'value': 'value3', 'type': 'string'}]), )] for a in alarms: self.alarm_conn.create_alarm(a) class AlarmTest(AlarmTestBase): def test_empty(self): alarms = list(self.alarm_conn.get_alarms()) self.assertEqual([], alarms) def test_list(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) def test_list_ordered_by_timestamp(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(len(alarms), 3) alarm_l = [a.timestamp for a in alarms] alarm_l_ordered = [datetime.datetime(2015, 7, 2, 10, 40), datetime.datetime(2015, 7, 2, 10, 25), datetime.datetime(2015, 7, 2, 10, 10)] self.assertEqual(alarm_l_ordered, alarm_l) def test_list_enabled(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms(enabled=True)) self.assertEqual(2, len(alarms)) def test_list_disabled(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms(enabled=False)) self.assertEqual(1, len(alarms)) def test_list_by_type(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms(alarm_type='threshold')) self.assertEqual(3, len(alarms)) alarms = list(self.alarm_conn.get_alarms(alarm_type='combination')) self.assertEqual(0, len(alarms)) def test_list_excluded_by_name(self): self.add_some_alarms() exclude = {'name': 'yellow-alert'} alarms = list(self.alarm_conn.get_alarms(exclude=exclude)) self.assertEqual(2, len(alarms)) alarm_names = sorted([a.name for a in alarms]) self.assertEqual(['orange-alert', 'red-alert'], alarm_names) def test_add(self): self.add_some_alarms() alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) meter_names = sorted([a.rule['meter_name'] for a in alarms]) self.assertEqual(['test.five', 'test.forty', 'test.one'], meter_names) def test_update(self): self.add_some_alarms() orange = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] orange.enabled = False orange.state = alarm_models.Alarm.ALARM_INSUFFICIENT_DATA query = [{'field': 'metadata.group', 'op': 'eq', 'value': 'test.updated', 'type': 'string'}] orange.rule['query'] = query orange.rule['meter_name'] = 'new_meter_name' updated = self.alarm_conn.update_alarm(orange) self.assertFalse(updated.enabled) self.assertEqual(alarm_models.Alarm.ALARM_INSUFFICIENT_DATA, updated.state) self.assertEqual(query, updated.rule['query']) self.assertEqual('new_meter_name', updated.rule['meter_name']) def test_update_llu(self): llu = alarm_models.Alarm(alarm_id='llu', enabled=True, type='threshold', name='llu', description='llu', timestamp=constants.MIN_DATETIME, user_id='bla', project_id='ffo', state="insufficient data", state_timestamp=constants.MIN_DATETIME, ok_actions=[], alarm_actions=[], insufficient_data_actions=[], repeat_actions=False, time_constraints=[], rule=dict(comparison_operator='lt', threshold=34, statistic='max', evaluation_periods=1, period=60, meter_name='llt', query=[]) ) updated = self.alarm_conn.update_alarm(llu) updated.state = alarm_models.Alarm.ALARM_OK updated.description = ':)' self.alarm_conn.update_alarm(updated) all = list(self.alarm_conn.get_alarms()) self.assertEqual(1, len(all)) def test_delete(self): self.add_some_alarms() victim = list(self.alarm_conn.get_alarms(name='orange-alert'))[0] self.alarm_conn.delete_alarm(victim.alarm_id) survivors = list(self.alarm_conn.get_alarms()) self.assertEqual(2, len(survivors)) for s in survivors: self.assertNotEqual(victim.name, s.name) @tests_db.run_with('sqlite', 'mysql', 'pgsql', 'hbase') class AlarmHistoryTest(AlarmTestBase): def setUp(self): super(AlarmTestBase, self).setUp() self.add_some_alarms() self.prepare_alarm_history() def prepare_alarm_history(self): alarms = list(self.alarm_conn.get_alarms()) for alarm in alarms: i = alarms.index(alarm) alarm_change = { "event_id": "3e11800c-a3ca-4991-b34b-d97efb6047d%s" % i, "alarm_id": alarm.alarm_id, "type": alarm_models.AlarmChange.CREATION, "detail": "detail %s" % alarm.name, "user_id": alarm.user_id, "project_id": alarm.project_id, "on_behalf_of": alarm.project_id, "timestamp": datetime.datetime(2014, 4, 7, 7, 30 + i) } self.alarm_conn.record_alarm_change(alarm_change=alarm_change) def _clear_alarm_history(self, utcnow, ttl, count): self.mock_utcnow.return_value = utcnow self.alarm_conn.clear_expired_alarm_history_data(ttl) history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(count, len(history)) def test_clear_alarm_history_no_data_to_remove(self): utcnow = datetime.datetime(2013, 4, 7, 7, 30) self._clear_alarm_history(utcnow, 1, 3) def test_clear_some_alarm_history(self): utcnow = datetime.datetime(2014, 4, 7, 7, 35) self._clear_alarm_history(utcnow, 3 * 60, 1) def test_clear_all_alarm_history(self): utcnow = datetime.datetime(2014, 4, 7, 7, 45) self._clear_alarm_history(utcnow, 3 * 60, 0) def test_delete_history_when_delete_alarm(self): alarms = list(self.alarm_conn.get_alarms()) self.assertEqual(3, len(alarms)) history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(3, len(history)) for alarm in alarms: self.alarm_conn.delete_alarm(alarm.alarm_id) self.assertEqual(3, len(alarms)) history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(0, len(history)) class ComplexAlarmQueryTest(AlarmTestBase): def test_no_filter(self): self.add_some_alarms() result = list(self.alarm_conn.query_alarms()) self.assertEqual(3, len(result)) def test_no_filter_with_limit(self): self.add_some_alarms() result = list(self.alarm_conn.query_alarms(limit=2)) self.assertEqual(2, len(result)) def test_filter(self): self.add_some_alarms() filter_expr = {"and": [{"or": [{"=": {"name": "yellow-alert"}}, {"=": {"name": "red-alert"}}]}, {"=": {"enabled": True}}]} result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) self.assertEqual(1, len(result)) for a in result: self.assertIn(a.name, set(["yellow-alert", "red-alert"])) self.assertTrue(a.enabled) def test_filter_with_regexp(self): self.add_some_alarms() filter_expr = {"and": [{"or": [{"=": {"name": "yellow-alert"}}, {"=": {"name": "red-alert"}}]}, {"=~": {"description": "yel.*"}}]} result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) self.assertEqual(1, len(result)) for a in result: self.assertEqual("yellow", a.description) def test_filter_for_alarm_id(self): self.add_some_alarms() filter_expr = {"=": {"alarm_id": "0r4ng3"}} result = list(self.alarm_conn.query_alarms(filter_expr=filter_expr)) self.assertEqual(1, len(result)) for a in result: self.assertEqual("0r4ng3", a.alarm_id) def test_filter_and_orderby(self): self.add_some_alarms() result = list(self.alarm_conn.query_alarms(filter_expr=( {"=": {"enabled": True}}), orderby=[{"name": "asc"}])) self.assertEqual(2, len(result)) self.assertEqual(["orange-alert", "red-alert"], [a.name for a in result]) for a in result: self.assertTrue(a.enabled) class ComplexAlarmHistoryQueryTest(AlarmTestBase): def setUp(self): super(DBTestBase, self).setUp() self.filter_expr = {"and": [{"or": [{"=": {"type": "rule change"}}, {"=": {"type": "state transition"}}]}, {"=": {"alarm_id": "0r4ng3"}}]} self.add_some_alarms() self.prepare_alarm_history() def prepare_alarm_history(self): alarms = list(self.alarm_conn.get_alarms()) name_index = { 'red-alert': 0, 'orange-alert': 1, 'yellow-alert': 2 } for alarm in alarms: i = name_index[alarm.name] alarm_change = dict(event_id=( "16fd2706-8baf-433b-82eb-8c7fada847c%s" % i), alarm_id=alarm.alarm_id, type=alarm_models.AlarmChange.CREATION, detail="detail %s" % alarm.name, user_id=alarm.user_id, project_id=alarm.project_id, on_behalf_of=alarm.project_id, timestamp=datetime.datetime(2012, 9, 24, 7 + i, 30 + i)) self.alarm_conn.record_alarm_change(alarm_change=alarm_change) alarm_change2 = dict(event_id=( "16fd2706-8baf-433b-82eb-8c7fada847d%s" % i), alarm_id=alarm.alarm_id, type=alarm_models.AlarmChange.RULE_CHANGE, detail="detail %s" % i, user_id=alarm.user_id, project_id=alarm.project_id, on_behalf_of=alarm.project_id, timestamp=datetime.datetime(2012, 9, 25, 10 + i, 30 + i)) self.alarm_conn.record_alarm_change(alarm_change=alarm_change2) alarm_change3 = dict( event_id="16fd2706-8baf-433b-82eb-8c7fada847e%s" % i, alarm_id=alarm.alarm_id, type=alarm_models.AlarmChange.STATE_TRANSITION, detail="detail %s" % (i + 1), user_id=alarm.user_id, project_id=alarm.project_id, on_behalf_of=alarm.project_id, timestamp=datetime.datetime(2012, 9, 26, 10 + i, 30 + i) ) if alarm.name == "red-alert": alarm_change3['on_behalf_of'] = 'and-da-girls' self.alarm_conn.record_alarm_change(alarm_change=alarm_change3) def test_alarm_history_with_no_filter(self): history = list(self.alarm_conn.query_alarm_history()) self.assertEqual(9, len(history)) def test_alarm_history_with_no_filter_and_limit(self): history = list(self.alarm_conn.query_alarm_history(limit=3)) self.assertEqual(3, len(history)) def test_alarm_history_with_filter(self): history = list( self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr)) self.assertEqual(2, len(history)) def test_alarm_history_with_regexp(self): filter_expr = {"and": [{"=~": {"type": "(rule)|(state)"}}, {"=": {"alarm_id": "0r4ng3"}}]} history = list( self.alarm_conn.query_alarm_history(filter_expr=filter_expr)) self.assertEqual(2, len(history)) def test_alarm_history_with_filter_and_orderby(self): history = list( self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, orderby=[{"timestamp": "asc"}])) self.assertEqual([alarm_models.AlarmChange.RULE_CHANGE, alarm_models.AlarmChange.STATE_TRANSITION], [h.type for h in history]) def test_alarm_history_with_filter_and_orderby_and_limit(self): history = list( self.alarm_conn.query_alarm_history(filter_expr=self.filter_expr, orderby=[{"timestamp": "asc"}], limit=1)) self.assertEqual(alarm_models.AlarmChange.RULE_CHANGE, history[0].type) def test_alarm_history_with_on_behalf_of_filter(self): filter_expr = {"=": {"on_behalf_of": "and-da-girls"}} history = list(self.alarm_conn.query_alarm_history( filter_expr=filter_expr)) self.assertEqual(1, len(history)) self.assertEqual("16fd2706-8baf-433b-82eb-8c7fada847e0", history[0].event_id) def test_alarm_history_with_alarm_id_as_filter(self): filter_expr = {"=": {"alarm_id": "r3d"}} history = list(self.alarm_conn.query_alarm_history( filter_expr=filter_expr, orderby=[{"timestamp": "asc"}])) self.assertEqual(3, len(history)) self.assertEqual([alarm_models.AlarmChange.CREATION, alarm_models.AlarmChange.RULE_CHANGE, alarm_models.AlarmChange.STATE_TRANSITION], [h.type for h in history]) aodh-2.0.6/aodh/tests/functional/storage/test_impl_log.py0000664000567000056710000000147213076064371024704 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslotest import base from aodh.storage import impl_log class ConnectionTest(base.BaseTestCase): @staticmethod def test_get_connection(): impl_log.Connection(cfg.CONF, None) aodh-2.0.6/aodh/tests/functional/storage/sqlalchemy/0000775000567000056710000000000013076064720023625 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/storage/sqlalchemy/test_models.py0000664000567000056710000000632113076064372026526 0ustar jenkinsjenkins00000000000000# # Copyright 2013 Rackspace Hosting # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import mock from oslotest import base import sqlalchemy from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.types import NUMERIC from aodh.storage.sqlalchemy import models class PreciseTimestampTest(base.BaseTestCase): @staticmethod def fake_dialect(name): def _type_descriptor_mock(desc): if type(desc) == DECIMAL: return NUMERIC(precision=desc.precision, scale=desc.scale) else: return desc dialect = mock.MagicMock() dialect.name = name dialect.type_descriptor = _type_descriptor_mock return dialect def setUp(self): super(PreciseTimestampTest, self).setUp() self._mysql_dialect = self.fake_dialect('mysql') self._postgres_dialect = self.fake_dialect('postgres') self._type = models.PreciseTimestamp() self._date = datetime.datetime(2012, 7, 2, 10, 44) def test_load_dialect_impl_mysql(self): result = self._type.load_dialect_impl(self._mysql_dialect) self.assertEqual(NUMERIC, type(result)) self.assertEqual(20, result.precision) self.assertEqual(6, result.scale) self.assertTrue(result.asdecimal) def test_load_dialect_impl_postgres(self): result = self._type.load_dialect_impl(self._postgres_dialect) self.assertEqual(sqlalchemy.DateTime, type(result)) def test_process_bind_param_store_datetime_postgres(self): result = self._type.process_bind_param(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_bind_param_store_none_mysql(self): result = self._type.process_bind_param(None, self._mysql_dialect) self.assertIsNone(result) def test_process_bind_param_store_none_postgres(self): result = self._type.process_bind_param(None, self._postgres_dialect) self.assertIsNone(result) def test_process_result_value_datetime_postgres(self): result = self._type.process_result_value(self._date, self._postgres_dialect) self.assertEqual(self._date, result) def test_process_result_value_none_mysql(self): result = self._type.process_result_value(None, self._mysql_dialect) self.assertIsNone(result) def test_process_result_value_none_postgres(self): result = self._type.process_result_value(None, self._postgres_dialect) self.assertIsNone(result) aodh-2.0.6/aodh/tests/functional/storage/sqlalchemy/test_migrations.py0000664000567000056710000000263013076064372027416 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import mock from oslo_db.sqlalchemy import test_migrations import six from aodh.storage.sqlalchemy import models from aodh.tests import base from aodh.tests.functional import db as tests_db class ABCSkip(base.SkipNotImplementedMeta, abc.ABCMeta): pass @tests_db.run_with('mysql', 'pgsql', 'sqlite') class ModelsMigrationsSync( six.with_metaclass(ABCSkip, tests_db.TestBase, test_migrations.ModelsMigrationsSync)): def setUp(self): super(ModelsMigrationsSync, self).setUp() self.db = mock.Mock() @staticmethod def get_metadata(): return models.Base.metadata def get_engine(self): return self.alarm_conn._engine_facade.get_engine() def db_sync(self, engine): pass aodh-2.0.6/aodh/tests/functional/storage/sqlalchemy/__init__.py0000664000567000056710000000000013076064371025726 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/gabbi/0000775000567000056710000000000013076064720021063 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/gabbi/test_gabbi_live.py0000664000567000056710000000352713076064371024570 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A test module to exercise the Gnocchi API with gabbi. This is designed to run against a real running web server (started by devstack). """ import os from gabbi import driver import six.moves.urllib.parse as urlparse TESTS_DIR = 'gabbits-live' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" aodh_url = os.getenv('AODH_SERVICE_URL') if aodh_url: parsed_url = urlparse.urlsplit(aodh_url) prefix = parsed_url.path.rstrip('/') # turn it into a prefix # NOTE(chdent): gabbi requires a port be passed or it will # default to 8001, so we must dance a little dance to get # the right ports. Probably gabbi needs to change. # https://github.com/cdent/gabbi/issues/50 port = 443 if parsed_url.scheme == 'https' else 80 if parsed_url.port: port = parsed_url.port test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=parsed_url.hostname, port=port, prefix=prefix) elif os.getenv('GABBI_LIVE_FAIL_IF_NO_TEST'): raise RuntimeError('AODH_SERVICE_URL is not set') aodh-2.0.6/aodh/tests/functional/gabbi/gabbits/0000775000567000056710000000000013076064720022476 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/gabbi/gabbits/capabilities.yaml0000664000567000056710000000037513076064372026023 0ustar jenkinsjenkins00000000000000# # Explore the capabilities API # fixtures: - ConfigFixture tests: - name: get capabilities desc: retrieve capabilities for the mongo store url: /v2/capabilities response_json_paths: $.alarm_storage.['storage:production_ready']: true aodh-2.0.6/aodh/tests/functional/gabbi/gabbits/middleware.yaml0000664000567000056710000000210513076064371025477 0ustar jenkinsjenkins00000000000000# # Test the middlewares. Just CORS for now. # fixtures: - ConfigFixture - CORSConfigFixture tests: - name: valid cors options OPTIONS: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors options OPTIONS: / status: 200 request_headers: origin: http://invalid.example.com access-control-request-method: GET response_forbidden_headers: - access-control-allow-origin - name: valid cors get GET: / status: 200 request_headers: origin: http://valid.example.com access-control-request-method: GET response_headers: access-control-allow-origin: http://valid.example.com - name: invalid cors get GET: / status: 200 request_headers: origin: http://invalid.example.com response_forbidden_headers: - access-control-allow-origin aodh-2.0.6/aodh/tests/functional/gabbi/gabbits/alarms.yaml0000664000567000056710000000650713076064372024654 0ustar jenkinsjenkins00000000000000# Requests to cover the basic endpoints for alarms. fixtures: - ConfigFixture tests: - name: list alarms none desc: Lists alarms, none yet exist url: /v2/alarms method: GET response_strings: - "[]" - name: try to PUT an alarm desc: what does PUT do url: /v2/alarms method: PUT request_headers: content-type: application/json data: name: added_alarm_defaults2 type: threshold threshold_rule: meter_name: ameter threshold: 300.0 status: 405 response_headers: allow: GET, POST - name: createAlarm desc: Creates an alarm. url: /v2/alarms method: POST request_headers: content-type: application/json data: ok_actions: null name: added_alarm_defaults type: threshold threshold_rule: meter_name: ameter threshold: 300.0 status: 201 response_headers: location: /$SCHEME://$NETLOC/v2/alarms/ content-type: application/json response_json_paths: $.severity: low $.threshold_rule.threshold: 300.0 $.threshold_rule.comparison_operator: eq - name: showAlarm desc: Shows information for a specified alarm. url: /v2/alarms/$RESPONSE['$.alarm_id'] method: GET response_json_paths: $.severity: low $.alarm_id: $RESPONSE['$.alarm_id'] $.threshold_rule.threshold: 300.0 $.threshold_rule.comparison_operator: eq response_headers: content-type: application/json - name: updateAlarm desc: Updates a specified alarm. url: /v2/alarms/$RESPONSE['$.alarm_id'] method: PUT request_headers: content-type: application/json data: name: added_alarm_defaults type: threshold severity: moderate threshold_rule: meter_name: ameter threshold: 200.0 # TODO(chdent): why do we have a response, why not status: 204? # status: 204 response_json_paths: $.threshold_rule.threshold: 200.0 $.severity: moderate $.state: insufficient data - name: showAlarmHistory desc: Assembles the history for a specified alarm. url: /v2/alarms/$RESPONSE['$.alarm_id']/history?q.field=type&q.op=eq&q.value=rule%20change method: GET response_json_paths: $[0].type: rule change - name: updateAlarmState desc: Sets the state of a specified alarm. url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state request_headers: content-type: application/json data: '"alarm"' method: PUT # TODO(chdent): really? Of what possible use is this? response_json_paths: $: alarm # Get a list of alarms so we can extract an id for the next test - name: list alarms for data desc: Lists alarms, only one url: /v2/alarms method: GET response_json_paths: $[0].name: added_alarm_defaults - name: showAlarmState desc: Gets the state of a specified alarm. url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state method: GET response_headers: content-type: application/json response_json_paths: $: alarm - name: list alarms one desc: Lists alarms, only one url: /v2/alarms method: GET response_json_paths: $[0].name: added_alarm_defaults - name: deleteAlarm desc: Deletes a specified alarm. url: /v2/alarms/$RESPONSE['$[0].alarm_id'] method: DELETE status: 204 - name: list alarms none end desc: Lists alarms, none now exist url: /v2/alarms method: GET response_strings: - "[]" aodh-2.0.6/aodh/tests/functional/gabbi/gabbits/basic.yaml0000664000567000056710000000126013076064372024445 0ustar jenkinsjenkins00000000000000# # Some simple tests just to confirm that the system works. # fixtures: - ConfigFixture tests: # Root gives us some information on where to go from here. - name: quick root check url: / response_headers: content-type: application/json response_strings: - '"base": "application/json"' response_json_paths: versions.values.[0].status: stable versions.values.[0].media-types.[0].base: application/json # NOTE(chdent): Ideally since / has a links ref to /v2, /v2 ought not 404! - name: v2 visit desc: this demonstrates a bug in the info in / url: $RESPONSE['versions.values.[0].links.[0].href'] status: 404 aodh-2.0.6/aodh/tests/functional/gabbi/gabbits-live/0000775000567000056710000000000013076064720023433 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/gabbi/gabbits-live/alarms.yaml0000664000567000056710000000747213076064372025613 0ustar jenkinsjenkins00000000000000defaults: request_headers: x-auth-token: $ENVIRON['AODH_SERVICE_TOKEN'] tests: - name: list alarms none desc: Lists alarms, none yet exist url: /v2/alarms method: GET response_strings: - "[]" - name: try to PUT an alarm desc: what does PUT do url: /v2/alarms method: PUT request_headers: content-type: application/json data: name: added_alarm_defaults2 type: threshold threshold_rule: meter_name: ameter threshold: 300.0 status: 405 response_headers: allow: GET, POST - name: createAlarm desc: Creates an alarm. url: /v2/alarms method: POST request_headers: content-type: application/json data: name: added_alarm_defaults type: threshold threshold_rule: meter_name: ameter threshold: 300.0 status: 201 response_headers: location: /$SCHEME://$NETLOC/v2/alarms/ content-type: application/json; charset=UTF-8 response_json_paths: $.severity: low $.threshold_rule.threshold: 300.0 $.threshold_rule.comparison_operator: eq - name: showAlarm desc: Shows information for a specified alarm. url: /v2/alarms/$RESPONSE['$.alarm_id'] method: GET response_json_paths: $.severity: low $.alarm_id: $RESPONSE['$.alarm_id'] $.threshold_rule.threshold: 300.0 $.threshold_rule.comparison_operator: eq response_headers: content-type: application/json; charset=UTF-8 - name: updateAlarm desc: Updates a specified alarm. url: /v2/alarms/$RESPONSE['$.alarm_id'] method: PUT request_headers: content-type: application/json data: name: added_alarm_defaults type: threshold severity: moderate threshold_rule: meter_name: ameter threshold: 200.0 # TODO(chdent): why do we have a response, why not status: 204? # status: 204 response_json_paths: $.threshold_rule.threshold: 200.0 $.severity: moderate $.state: insufficient data - name: showAlarmHistory desc: Assembles the history for a specified alarm. url: /v2/alarms/$RESPONSE['$.alarm_id']/history?q.field=type&q.op=eq&q.value=rule%20change method: GET response_json_paths: $[0].type: rule change - name: updateAlarmState desc: Sets the state of a specified alarm. url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state request_headers: content-type: application/json data: '"alarm"' method: PUT # TODO(chdent): really? Of what possible use is this? response_json_paths: $: alarm # Get a list of alarms so we can extract an id for the next test - name: list alarms for data desc: Lists alarms, only one url: /v2/alarms method: GET response_json_paths: $[0].name: added_alarm_defaults - name: showAlarmState desc: Gets the state of a specified alarm. url: /v2/alarms/$RESPONSE['$[0].alarm_id']/state method: GET response_headers: content-type: application/json; charset=UTF-8 response_json_paths: $: alarm - name: list alarms one desc: Lists alarms, only one url: /v2/alarms method: GET response_json_paths: $[0].name: added_alarm_defaults - name: deleteAlarm desc: Deletes a specified alarm. url: /v2/alarms/$RESPONSE['$[0].alarm_id'] method: DELETE status: 204 - name: list alarms none end desc: Lists alarms, none now exist url: /v2/alarms method: GET response_strings: - "[]" aodh-2.0.6/aodh/tests/functional/gabbi/__init__.py0000664000567000056710000000000013076064371023164 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/tests/functional/gabbi/gabbi_paste.ini0000664000567000056710000000125313076064372024030 0ustar jenkinsjenkins00000000000000# aodh API WSGI Pipeline # Define the filters that make up the pipeline for processing WSGI requests # Note: This pipeline is PasteDeploy's term rather than aodh's pipeline # used for processing samples # # This is the gabbi paste file, which creates the full pipeline without the # auth middleware. # Remove authtoken from the pipeline if you don't want to use keystone authentication [pipeline:main] pipeline = cors request_id api-server [app:api-server] paste.app_factory = aodh.api.app:app_factory [filter:request_id] paste.filter_factory = oslo_middleware:RequestId.factory [filter:cors] paste.filter_factory = oslo_middleware.cors:filter_factory oslo_config_project = aodh aodh-2.0.6/aodh/tests/functional/gabbi/fixtures.py0000664000567000056710000001047113076064372023314 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Fixtures used during Gabbi-based test runs.""" import os from unittest import case import uuid from gabbi import fixture import mock from oslo_config import cfg from oslo_config import fixture as fixture_config from oslo_policy import opts from six.moves.urllib import parse as urlparse from aodh import service from aodh import storage # TODO(chdent): For now only MongoDB is supported, because of easy # database name handling and intentional focus on the API, not the # data store. ENGINES = ['mongodb'] class ConfigFixture(fixture.GabbiFixture): """Establish the relevant configuration for a test run.""" def start_fixture(self): """Set up config.""" self.conf = None # Determine the database connection. db_url = os.environ.get( 'AODH_TEST_STORAGE_URL', os.environ.get( "OVERTEST_URL", 'sqlite://').replace( "mysql://", "mysql+pymysql://")) if not db_url: raise case.SkipTest('No database connection configured') engine = urlparse.urlparse(db_url).scheme if engine not in ENGINES: raise case.SkipTest('Database engine not supported') conf = service.prepare_service([], config_files=[]) # NOTE(jd): prepare_service() is called twice: first by load_app() for # Pecan, then Pecan calls pastedeploy, which starts the app, which has # no way to pass the conf object so that Paste apps calls again # prepare_service. In real life, that's not a problem, but here we want # to be sure that the second time the same conf object is returned # since we tweaked it. To that, once we called prepare_service() we # mock it so it returns the same conf object. self.prepare_service = service.prepare_service service.prepare_service = mock.Mock() service.prepare_service.return_value = conf conf = fixture_config.Config(conf).conf self.conf = conf opts.set_defaults(self.conf) conf.set_override('policy_file', os.path.abspath( 'aodh/tests/open-policy.json'), group='oslo_policy', enforce_type=True) conf.set_override( 'paste_config', os.path.abspath('aodh/tests/functional/gabbi/gabbi_paste.ini'), group='api', ) database_name = '%s-%s' % (db_url, str(uuid.uuid4())) conf.set_override('connection', database_name, group='database', enforce_type=True) conf.set_override('pecan_debug', True, group='api', enforce_type=True) def stop_fixture(self): """Reset the config and remove data.""" if self.conf: storage.get_connection_from_config(self.conf).clear() self.conf.reset() service.prepare_service = self.prepare_service class CORSConfigFixture(fixture.GabbiFixture): """Inject mock configuration for the CORS middleware.""" def start_fixture(self): # Here we monkeypatch GroupAttr.__getattr__, necessary because the # paste.ini method of initializing this middleware creates its own # ConfigOpts instance, bypassing the regular config fixture. def _mock_getattr(instance, key): if key != 'allowed_origin': return self._original_call_method(instance, key) return "http://valid.example.com" self._original_call_method = cfg.ConfigOpts.GroupAttr.__getattr__ cfg.ConfigOpts.GroupAttr.__getattr__ = _mock_getattr def stop_fixture(self): """Remove the monkeypatch.""" cfg.ConfigOpts.GroupAttr.__getattr__ = self._original_call_method aodh-2.0.6/aodh/tests/functional/gabbi/test_gabbi.py0000664000567000056710000000242213076064372023543 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """A test module to exercise the aodh API with gabbi For the sake of exploratory development. """ import os from gabbi import driver from aodh.api import app from aodh import service from aodh.tests.functional.gabbi import fixtures as fixture_module TESTS_DIR = 'gabbits' def load_tests(loader, tests, pattern): """Provide a TestSuite to the discovery process.""" test_dir = os.path.join(os.path.dirname(__file__), TESTS_DIR) return driver.build_tests(test_dir, loader, host=None, intercept=setup_app, fixture_module=fixture_module) def setup_app(): conf = service.prepare_service([]) return app.load_app(conf) aodh-2.0.6/aodh/tests/base.py0000664000567000056710000000706713076064372017156 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2012 New Dream Network (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test base classes. """ import functools import os.path import oslo_messaging.conffixture from oslo_utils import timeutils from oslotest import base from oslotest import mockpatch import six from testtools import testcase import webtest import aodh from aodh import messaging class BaseTestCase(base.BaseTestCase): def setup_messaging(self, conf, exchange=None): self.useFixture(oslo_messaging.conffixture.ConfFixture(conf)) conf.set_override("notification_driver", "messaging") if not exchange: exchange = 'aodh' conf.set_override("control_exchange", exchange) # NOTE(sileht): Ensure a new oslo.messaging driver is loaded # between each tests self.transport = messaging.get_transport(conf, "fake://", cache=False) self.useFixture(mockpatch.Patch( 'aodh.messaging.get_transport', return_value=self.transport)) def assertTimestampEqual(self, first, second, msg=None): """Checks that two timestamps are equals. This relies on assertAlmostEqual to avoid rounding problem, and only checks up the first microsecond values. """ return self.assertAlmostEqual( timeutils.delta_seconds(first, second), 0.0, places=5) def assertIsEmpty(self, obj): try: if len(obj) != 0: self.fail("%s is not empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) def assertIsNotEmpty(self, obj): try: if len(obj) == 0: self.fail("%s is empty" % type(obj)) except (TypeError, AttributeError): self.fail("%s doesn't have length" % type(obj)) @staticmethod def path_get(project_file=None): root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', ) ) if project_file: return os.path.join(root, project_file) else: return root def _skip_decorator(func): @functools.wraps(func) def skip_if_not_implemented(*args, **kwargs): try: return func(*args, **kwargs) except aodh.NotImplementedError as e: raise testcase.TestSkipped(six.text_type(e)) except webtest.app.AppError as e: if 'not implemented' in six.text_type(e): raise testcase.TestSkipped(six.text_type(e)) raise return skip_if_not_implemented class SkipNotImplementedMeta(type): def __new__(cls, name, bases, local): for attr in local: value = local[attr] if callable(value) and ( attr.startswith('test_') or attr == 'setUp'): local[attr] = _skip_decorator(value) return type.__new__(cls, name, bases, local) aodh-2.0.6/aodh/notifier/0000775000567000056710000000000013076064720016332 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/notifier/zaqar.py0000664000567000056710000001164113076064372020030 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Zaqar alarm notifier.""" from oslo_config import cfg from oslo_log import log import six.moves.urllib.parse as urlparse from aodh.i18n import _LE, _LI from aodh import keystone_client from aodh import notifier LOG = log.getLogger(__name__) SERVICE_OPTS = [ cfg.StrOpt('zaqar', default='messaging', help='Message queue service type.'), ] cfg.CONF.register_opts(SERVICE_OPTS, group='service_types') class ZaqarAlarmNotifier(notifier.AlarmNotifier): """Zaqar notifier.""" def __init__(self, conf): super(ZaqarAlarmNotifier, self).__init__(conf) self.conf = conf self._zclient = None def _get_endpoint(self): try: ks_client = keystone_client.get_client(self.conf) return ks_client.service_catalog.url_for( service_type=cfg.CONF.service_types.zaqar, endpoint_type=self.conf.service_credentials.os_endpoint_type) except Exception: LOG.error(_LE("Aodh was configured to use zaqar:// action," " but Zaqar endpoint could not be found in Keystone" " service catalog.")) def get_zaqar_client(self): conf = self.conf.service_credentials params = { 'auth_opts': { 'backend': 'keystone', 'options': { 'os_username': conf.os_username, 'os_password': conf.os_password, 'os_project_name': conf.os_tenant_name, 'os_auth_url': conf.os_auth_url, 'insecure': '' } } } try: from zaqarclient.queues import client as zaqar_client return zaqar_client.Client(self._get_endpoint(), version=1.1, conf=params) except Exception: LOG.error(_LE("Failed to connect to Zaqar service "), exc_info=True) def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data, headers=None): LOG.info(_LI( "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s " "priority from %(previous)s to %(current)s with action %(action)s" " because %(reason)s.") % ({'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'action': action, 'reason': reason})) body = {'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'reason': reason, 'reason_data': reason_data} message = dict(body=body) self.notify_zaqar(action, message) @property def client(self): if self._zclient is None: self._zclient = self.get_zaqar_client() return self._zclient def notify_zaqar(self, action, message): queue_info = urlparse.parse_qs(action.query) try: # queue_name is a combination of - queue_name = "%s-%s" % (message['body']['alarm_id'], queue_info.get('topic')[-1]) # create a queue in zaqar queue = self.client.queue(queue_name, force_create=True) subscriber_list = queue_info.get('subscriber', []) ttl = queue_info.get('ttl', [3600])[-1] for subscriber in subscriber_list: # add subscriber to the zaqar queue subscription_data = dict(subscriber=subscriber, ttl=ttl) self.client.subscription(queue_name, **subscription_data) # post the message to the queue queue.post(message) except IndexError: LOG.error(_LE("Required topic query option missing in action %s") % action) except Exception: LOG.error(_LE("Unknown error occurred; Failed to post message to" " Zaqar queue"), exc_info=True) aodh-2.0.6/aodh/notifier/trust.py0000664000567000056710000000347013076064372020074 0ustar jenkinsjenkins00000000000000# # Copyright 2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Rest alarm notifier with trusted authentication.""" from six.moves.urllib import parse from aodh import keystone_client from aodh.notifier import rest class TrustRestAlarmNotifier(rest.RestAlarmNotifier): """Notifier supporting keystone trust authentication. This alarm notifier is intended to be used to call an endpoint using keystone authentication. It uses the aodh service user to authenticate using the trust ID provided. The URL must be in the form trust+http://trust-id@host/action. """ def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): trust_id = action.username client = keystone_client.get_trusted_client(self.conf, trust_id) # Remove the fake user netloc = action.netloc.split("@")[1] # Remove the trust prefix scheme = action.scheme[6:] action = parse.SplitResult(scheme, netloc, action.path, action.query, action.fragment) headers = {'X-Auth-Token': keystone_client.get_auth_token(client)} super(TrustRestAlarmNotifier, self).notify( action, alarm_id, alarm_name, severity, previous, current, reason, reason_data, headers) aodh-2.0.6/aodh/notifier/log.py0000664000567000056710000000276113076064372017476 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Log alarm notifier.""" from oslo_log import log from aodh.i18n import _ from aodh import notifier LOG = log.getLogger(__name__) class LogAlarmNotifier(notifier.AlarmNotifier): "Log alarm notifier.""" @staticmethod def notify(action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): LOG.info(_( "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s " "priority from %(previous)s to %(current)s with action %(action)s" " because %(reason)s.") % ({'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'action': action, 'reason': reason})) aodh-2.0.6/aodh/notifier/__init__.py0000664000567000056710000001350113076064372020446 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import logging from oslo_config import cfg import oslo_messaging from oslo_service import service as os_service from oslo_utils import netutils import six from stevedore import extension from aodh.i18n import _ from aodh import messaging OPTS = [ cfg.StrOpt('ipc_protocol', default='queue', choices=['queue', 'rpc'], help='The protocol used to communicate between evaluator and ' 'notifier services.'), ] LOG = logging.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class AlarmNotifier(object): """Base class for alarm notifier plugins.""" @staticmethod def __init__(conf): pass @abc.abstractmethod def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): """Notify that an alarm has been triggered. :param action: The action that is being attended, as a parsed URL. :param alarm_id: The triggered alarm. :param alarm_name: The name of triggered alarm. :param severity: The level of triggered alarm :param previous: The previous state of the alarm. :param current: The current state of the alarm. :param reason: The reason the alarm changed its state. :param reason_data: A dict representation of the reason. """ class AlarmNotifierService(os_service.Service): NOTIFIER_EXTENSIONS_NAMESPACE = "aodh.notifier" def __init__(self, conf): super(AlarmNotifierService, self).__init__() transport = messaging.get_transport(conf) self.notifiers = extension.ExtensionManager( self.NOTIFIER_EXTENSIONS_NAMESPACE, invoke_on_load=True, invoke_args=(conf,)) if conf.ipc_protocol == 'rpc': self.ipc = 'rpc' self.rpc_server = messaging.get_rpc_server( conf, transport, conf.notifier_rpc_topic, self) else: self.ipc = 'queue' target = oslo_messaging.Target(topic=conf.notifier_topic) self.listener = messaging.get_notification_listener( transport, [target], [AlarmEndpoint(self.notifiers)]) def start(self): super(AlarmNotifierService, self).start() if self.ipc == 'rpc': self.rpc_server.start() else: self.listener.start() # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) def stop(self): if self.ipc == 'rpc': self.rpc_server.stop() else: self.listener.stop() self.listener.wait() super(AlarmNotifierService, self).stop() def notify_alarm(self, context, data): process_alarm(self.notifiers, data) def _handle_action(notifiers, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): """Process action on alarm :param notifiers: list of possible notifiers. :param action: The action that is being attended, as a parsed URL. :param alarm_id: The triggered alarm. :param alarm_name: The name of triggered alarm. :param severity: The level of triggered alarm :param previous: The previous state of the alarm. :param current: The current state of the alarm. :param reason: The reason the alarm changed its state. :param reason_data: A dict representation of the reason. """ try: action = netutils.urlsplit(action) except Exception: LOG.error( _("Unable to parse action %(action)s for alarm %(alarm_id)s"), {'action': action, 'alarm_id': alarm_id}) return try: notifier = notifiers[action.scheme].obj except KeyError: scheme = action.scheme LOG.error( _("Action %(scheme)s for alarm %(alarm_id)s is unknown, " "cannot notify"), {'scheme': scheme, 'alarm_id': alarm_id}) return try: LOG.debug("Notifying alarm %(id)s with action %(act)s", {'id': alarm_id, 'act': action}) notifier.notify(action, alarm_id, alarm_name, severity, previous, current, reason, reason_data) except Exception: LOG.exception(_("Unable to notify alarm %s"), alarm_id) return def process_alarm(notifiers, data): """Notify that alarm has been triggered. :param notifiers: list of possible notifiers :param data: (dict): alarm data """ actions = data.get('actions') if not actions: LOG.error(_("Unable to notify for an alarm with no action")) return for action in actions: _handle_action(notifiers, action, data.get('alarm_id'), data.get('alarm_name'), data.get('severity'), data.get('previous'), data.get('current'), data.get('reason'), data.get('reason_data')) class AlarmEndpoint(object): def __init__(self, notifiers): self.notifiers = notifiers def sample(self, ctxt, publisher_id, event_type, payload, metadata): """Endpoint for alarm notifications""" process_alarm(self.notifiers, payload) aodh-2.0.6/aodh/notifier/test.py0000664000567000056710000000243613076064371017672 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Test alarm notifier.""" from aodh import notifier class TestAlarmNotifier(notifier.AlarmNotifier): "Test alarm notifier.""" def __init__(self, conf): super(TestAlarmNotifier, self).__init__(conf) self.notifications = [] def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data): self.notifications.append((action, alarm_id, alarm_name, severity, previous, current, reason, reason_data)) aodh-2.0.6/aodh/notifier/rest.py0000664000567000056710000001003513076064372017663 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2014 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Rest alarm notifier.""" from oslo_config import cfg from oslo_context import context from oslo_log import log from oslo_serialization import jsonutils import requests import six.moves.urllib.parse as urlparse from aodh.i18n import _ from aodh import notifier LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('rest_notifier_certificate_file', default='', deprecated_group="alarm", help='SSL Client certificate for REST notifier.' ), cfg.StrOpt('rest_notifier_certificate_key', default='', deprecated_group="alarm", help='SSL Client private key for REST notifier.' ), cfg.BoolOpt('rest_notifier_ssl_verify', default=True, deprecated_group="alarm", help='Whether to verify the SSL Server certificate when ' 'calling alarm action.' ), cfg.IntOpt('rest_notifier_max_retries', default=0, deprecated_group="alarm", help='Number of retries for REST notifier', ), ] class RestAlarmNotifier(notifier.AlarmNotifier): """Rest alarm notifier.""" def __init__(self, conf): super(RestAlarmNotifier, self).__init__(conf) self.conf = conf def notify(self, action, alarm_id, alarm_name, severity, previous, current, reason, reason_data, headers=None): headers = headers or {} if not headers.get('x-openstack-request-id'): headers['x-openstack-request-id'] = context.generate_request_id() LOG.info(_( "Notifying alarm %(alarm_name)s %(alarm_id)s with severity" " %(severity)s from %(previous)s to %(current)s with action " "%(action)s because %(reason)s. request-id: %(request_id)s ") % ({'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'action': action, 'reason': reason, 'request_id': headers['x-openstack-request-id']})) body = {'alarm_name': alarm_name, 'alarm_id': alarm_id, 'severity': severity, 'previous': previous, 'current': current, 'reason': reason, 'reason_data': reason_data} headers['content-type'] = 'application/json' kwargs = {'data': jsonutils.dumps(body), 'headers': headers} if action.scheme == 'https': default_verify = int(self.conf.rest_notifier_ssl_verify) options = urlparse.parse_qs(action.query) verify = bool(int(options.get('aodh-alarm-ssl-verify', [default_verify])[-1])) kwargs['verify'] = verify cert = self.conf.rest_notifier_certificate_file key = self.conf.rest_notifier_certificate_key if cert: kwargs['cert'] = (cert, key) if key else cert # FIXME(rhonjo): Retries are automatically done by urllib3 in requests # library. However, there's no interval between retries in urllib3 # implementation. It will be better to put some interval between # retries (future work). max_retries = self.conf.rest_notifier_max_retries session = requests.Session() session.mount(action.geturl(), requests.adapters.HTTPAdapter(max_retries=max_retries)) session.post(action.geturl(), **kwargs) aodh-2.0.6/aodh/cmd/0000775000567000056710000000000013076064720015256 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/cmd/api.py0000664000567000056710000000140513076064372016404 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from aodh.api import app from aodh import service def main(): conf = service.prepare_service() app.build_server(conf) aodh-2.0.6/aodh/cmd/data_migration.py0000664000567000056710000001325313076064372020621 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ A tool for migrating alarms and alarms history data from NoSQL to SQL. NOTES: - Users need to specify the source NoSQL connection url and the destination SQL connection URL with this tool, an usage example: aodh-data-migration --nosql-conn \ mongodb://aodh:password@127.0.0.1:27017/aodh --sql-conn \ mysql+pymysql://root:password@127.0.0.1/aodh?charset=utf8 - Both the alarm data and alarm history data will be migrated when running this tool, but the history data migration can be avoid by specifying False of --migrate-history option. - It is better to ensure the db connection is OK when running this tool, and this tool can be run repeatedly, the duplicated data will be skipped. - This tool depends on the NoSQL and SQL drivers of Aodh, so it is should be used only before the removal of NoSQL drivers. - This tool has been tested OK in devstack environment, but users need to be cautious with this, because the data migration between storage backends is a bit dangerous. """ import argparse import logging import sys from oslo_config import cfg from oslo_db import exception from oslo_db import options as db_options from aodh.i18n import _LE, _LI, _LW import six.moves.urllib.parse as urlparse from aodh import storage root_logger = logging.getLogger('') def get_parser(): parser = argparse.ArgumentParser( description='A tool for Migrating alarms and alarms history from' ' NoSQL to SQL', ) parser.add_argument( '--nosql-conn', required=True, type=str, help='The source NoSQL database connection.', ) parser.add_argument( '--sql-conn', required=True, type=str, help='The destination SQL database connection.', ) parser.add_argument( '--migrate-history', default=True, type=bool, help='Migrate history data when migrate alarms or not,' ' True as Default.', ) parser.add_argument( '--debug', default=False, action='store_true', help='Show the debug level log messages.', ) return parser def _validate_conn_options(args): nosql_scheme = urlparse.urlparse(args.nosql_conn).scheme sql_scheme = urlparse.urlparse(args.sql_conn).scheme if nosql_scheme not in ('mongodb', 'hbase'): root_logger.error(_LE('Invalid source DB type %s, the source database ' 'connection should be one of: [mongodb, hbase]' ), nosql_scheme) sys.exit(1) if sql_scheme not in ('mysql', 'mysql+pymysql', 'postgresql', 'sqlite'): root_logger.error(_LE('Invalid destination DB type %s, the destination' ' database connection should be one of: ' '[mysql, postgresql, sqlite]'), sql_scheme) sys.exit(1) def main(): args = get_parser().parse_args() # Set up logging to use the console console = logging.StreamHandler(sys.stderr) formatter = logging.Formatter( '[%(asctime)s] %(levelname)-8s %(message)s') console.setFormatter(formatter) root_logger.addHandler(console) if args.debug: root_logger.setLevel(logging.DEBUG) else: root_logger.setLevel(logging.INFO) _validate_conn_options(args) nosql_conf = cfg.ConfigOpts() db_options.set_defaults(nosql_conf, args.nosql_conn) nosql_conf.register_opts(storage.OPTS, 'database') nosql_conn = storage.get_connection_from_config(nosql_conf) sql_conf = cfg.ConfigOpts() db_options.set_defaults(sql_conf, args.sql_conn) sql_conf.register_opts(storage.OPTS, 'database') sql_conn = storage.get_connection_from_config(sql_conf) root_logger.info( _LI("Starting to migrate alarms data from NoSQL to SQL...")) count = 0 for alarm in nosql_conn.get_alarms(): root_logger.debug("Migrating alarm %s..." % alarm.alarm_id) try: sql_conn.create_alarm(alarm) count += 1 except exception.DBDuplicateEntry: root_logger.warning(_LW("Duplicated alarm %s found, skipped."), alarm.alarm_id) if not args.migrate_history: continue history_count = 0 for history in nosql_conn.get_alarm_changes(alarm.alarm_id, None): history_data = history.as_dict() root_logger.debug(" Migrating alarm history data with" " event_id %s..." % history_data['event_id']) try: sql_conn.record_alarm_change(history_data) history_count += 1 except exception.DBDuplicateEntry: root_logger.warning( _LW(" Duplicated alarm history %s found, skipped."), history_data['event_id']) root_logger.info(_LI(" Migrated %(count)s history data of alarm " "%(alarm_id)s"), {'count': history_count, 'alarm_id': alarm.alarm_id}) root_logger.info(_LI("End alarms data migration from NoSQL to SQL, %s" " alarms have been migrated."), count) aodh-2.0.6/aodh/cmd/storage.py0000664000567000056710000000244113076064372017300 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from aodh.i18n import _LI from aodh import service from aodh import storage LOG = logging.getLogger(__name__) def dbsync(): conf = service.prepare_service() storage.get_connection_from_config(conf).upgrade() def expirer(): conf = service.prepare_service() if conf.database.alarm_history_time_to_live > 0: LOG.debug("Clearing expired alarm history data") storage_conn = storage.get_connection_from_config(conf) storage_conn.clear_expired_alarm_history_data( conf.database.alarm_history_time_to_live) else: LOG.info(_LI("Nothing to clean, database alarm history time to live " "is disabled")) aodh-2.0.6/aodh/cmd/alarm.py0000664000567000056710000000233113076064372016726 0ustar jenkinsjenkins00000000000000# -*- encoding: utf-8 -*- # # Copyright 2014 OpenStack Foundation # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_service import service as os_service from aodh import evaluator as evaluator_svc from aodh import event as event_svc from aodh import notifier as notifier_svc from aodh import service def notifier(): conf = service.prepare_service() os_service.launch(conf, notifier_svc.AlarmNotifierService(conf)).wait() def evaluator(): conf = service.prepare_service() os_service.launch(conf, evaluator_svc.AlarmEvaluationService(conf)).wait() def listener(): conf = service.prepare_service() os_service.launch(conf, event_svc.EventAlarmEvaluationService(conf)).wait() aodh-2.0.6/aodh/cmd/__init__.py0000664000567000056710000000000013076064371017357 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/rpc.py0000664000567000056710000000435413076064372015662 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import six from aodh import messaging from aodh.storage import models OPTS = [ cfg.StrOpt('notifier_rpc_topic', default='alarm_notifier', deprecated_group='alarm', help='The topic that aodh uses for alarm notifier ' 'messages.'), ] LOG = log.getLogger(__name__) class RPCAlarmNotifier(object): def __init__(self, conf): transport = messaging.get_transport(conf) self.client = messaging.get_rpc_client( transport, topic=conf.notifier_rpc_topic, version="1.0") def notify(self, alarm, previous, reason, reason_data): actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state]) if not actions: LOG.debug('alarm %(alarm_id)s has no action configured ' 'for state transition from %(previous)s to ' 'state %(state)s, skipping the notification.', {'alarm_id': alarm.alarm_id, 'previous': previous, 'state': alarm.state}) return self.client.cast({}, 'notify_alarm', data={ 'actions': actions, 'alarm_id': alarm.alarm_id, 'alarm_name': alarm.name, 'severity': alarm.severity, 'previous': previous, 'current': alarm.state, 'reason': six.text_type(reason), 'reason_data': reason_data}) aodh-2.0.6/aodh/service.py0000664000567000056710000000517513076064372016540 0ustar jenkinsjenkins00000000000000#!/usr/bin/env python # # Copyright 2013 Red Hat, Inc # Copyright 2012-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import socket from oslo_config import cfg from oslo_db import options as db_options import oslo_i18n from oslo_log import log from oslo_policy import opts as policy_opts from aodh.conf import defaults from aodh import keystone_client from aodh import messaging OPTS = [ cfg.StrOpt('host', default=socket.gethostname(), help='Name of this node, which must be valid in an AMQP ' 'key. Can be an opaque identifier. For ZeroMQ only, must ' 'be a valid host name, FQDN, or IP address.'), cfg.IntOpt('http_timeout', default=600, help='Timeout seconds for HTTP requests. Set it to None to ' 'disable timeout.'), cfg.IntOpt('evaluation_interval', default=60, help='Period of evaluation cycle, should' ' be >= than configured pipeline interval for' ' collection of underlying meters.', deprecated_group='alarm', deprecated_opts=[cfg.DeprecatedOpt( 'threshold_evaluation_interval', group='alarm')]), ] def prepare_service(argv=None, config_files=None): conf = cfg.ConfigOpts() oslo_i18n.enable_lazy() log.register_options(conf) log_levels = (conf.default_log_levels + ['stevedore=INFO', 'keystoneclient=INFO']) log.set_defaults(default_log_levels=log_levels) defaults.set_cors_middleware_defaults() db_options.set_defaults(conf) policy_opts.set_defaults(conf) from aodh import opts # Register our own Aodh options for group, options in opts.list_opts(): conf.register_opts(list(options), group=None if group == "DEFAULT" else group) keystone_client.register_keystoneauth_opts(conf) conf(argv, project='aodh', validate_default_values=True, default_config_files=config_files) keystone_client.setup_keystoneauth(conf) log.setup(conf, 'aodh') messaging.setup() return conf aodh-2.0.6/aodh/locale/0000775000567000056710000000000013076064720015752 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/locale/aodh-log-error.pot0000664000567000056710000000303613076064372021324 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the aodh project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-19 06:30+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: aodh/coordination.py:109 aodh/coordination.py:121 msgid "Error connecting to coordination backend." msgstr "" #: aodh/coordination.py:137 msgid "Error sending a heartbeat to coordination backend." msgstr "" #: aodh/coordination.py:207 msgid "Error getting group membership info from coordination backend." msgstr "" #: aodh/evaluator/combination.py:36 #, python-format msgid "alarm %s retrieval failed" msgstr "" #: aodh/evaluator/combination.py:39 #, python-format msgid "alarm %s doesn't exists anymore" msgstr "" #: aodh/evaluator/event.py:78 msgid "Received invalid event (empty or None)" msgstr "" #: aodh/evaluator/event.py:82 #, python-format msgid "Failed to extract event_type from event = %s" msgstr "" #: aodh/evaluator/event.py:87 #, python-format msgid "Failed to extract message_id from event = %s" msgstr "" #: aodh/evaluator/event.py:178 #, python-format msgid "Failed to evaluate alarm (id=%(a)s) triggered by event = %(e)s." msgstr "" aodh-2.0.6/aodh/locale/aodh-log-info.pot0000664000567000056710000000230113076064372021120 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: aodh 0.0.1.dev3988\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2015-07-29 06:41+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.0\n" #: aodh/coordination.py:76 msgid "Coordination backend started successfully." msgstr "" #: aodh/coordination.py:126 #, python-format msgid "Joined partitioning group %s" msgstr "" #: aodh/coordination.py:144 #, python-format msgid "Left partitioning group %s" msgstr "" #: aodh/cmd/eventlet/storage.py:43 msgid "Nothing to clean, database alarm history time to live is disabled" msgstr "" #: aodh/storage/impl_log.py:67 #, python-format msgid "Dropping alarm history data with TTL %d" msgstr "" #: aodh/storage/impl_sqlalchemy.py:308 #, python-format msgid "%d alarm histories are removed from database" msgstr "" aodh-2.0.6/aodh/locale/es/0000775000567000056710000000000013076064720016361 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/locale/es/LC_MESSAGES/0000775000567000056710000000000013076064720020146 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/locale/es/LC_MESSAGES/aodh-log-warning.po0000664000567000056710000000216513076064372023652 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # R. Paula Sánchez , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-03 11:04+0000\n" "Last-Translator: R. Paula Sánchez \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "Expecting %(expected)d datapoints but only get %(actual)d" msgstr "Se esperan %(expected)d observaciones, pero se obtienen %(actual)d" msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" "no puede habilitarse pecan_debug. Si trabajadores es > 1, el valor se anula " "por Falso" aodh-2.0.6/aodh/locale/es/LC_MESSAGES/aodh-log-info.po0000664000567000056710000000273113076064372023137 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # R. Paula Sánchez , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-03 11:00+0000\n" "Last-Translator: R. Paula Sánchez \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" #, python-format msgid "%d alarm histories are removed from database" msgstr "%d historiales de alarma borrados de la base de datos." msgid "Coordination backend started successfully." msgstr "Servidor coordinador iniciado con éxito" #, python-format msgid "Dropping alarm history data with TTL %d" msgstr "Eliminando datos del historial de alarmas con TTL de %d" #, python-format msgid "Joined partitioning group %s" msgstr "Se ha unido al grupo de partición %s" #, python-format msgid "Left partitioning group %s" msgstr "Ha dejado el grupo de partición %s" msgid "Nothing to clean, database alarm history time to live is disabled" msgstr "Vida del historial de alarmas de la base de datos: desactivada." aodh-2.0.6/aodh/locale/pt/0000775000567000056710000000000013076064720016375 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/locale/pt/LC_MESSAGES/0000775000567000056710000000000013076064720020162 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/locale/pt/LC_MESSAGES/aodh-log-warning.po0000664000567000056710000000217513076064372023667 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # AnaFonseca , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-17 02:28+0000\n" "Last-Translator: AnaFonseca \n" "Language: pt\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese\n" #, python-format msgid "Expecting %(expected)d datapoints but only get %(actual)d" msgstr "" "Esperando %(expected)d endereços de grupo mas obtendo apenas %(actual)d" msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" "pecan_debug não pode ser ativada, se os trabalhadores são > 1, o valor é " "conseriderado Falso" aodh-2.0.6/aodh/locale/pt/LC_MESSAGES/aodh.po0000664000567000056710000002163413076064372021446 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # AnaFonseca , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-24 07:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: pt\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s Não Encontrado" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "a contagem %(name)s excede o valor máximo %(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s devem ser definidas para o tipo de aviso %(type)s" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s e %(rule2)s não podem ser programadas ao mesmo tempo" #, python-format msgid "%d datapoints are unknown" msgstr "endereços de grupo %d desconhecidos" #, python-format msgid "Action %(scheme)s for alarm %(alarm_id)s is unknown, cannot notify" msgstr "" "A ação %(scheme)s para o alarme %(alarm_id)s é desconhecida, impossível " "notificar." #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Alarme %(alarm_id)s não encontrado no projeto %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Alarme %s não encontrado" msgid "Alarm combination rule should contain at least two different alarm ids." msgstr "" "A regra de combinação de avisos deve conter pelo menos duas ids de alarme " "diferentes." msgid "Alarm incorrect" msgstr "Alarme incorreto" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "Aviso de quota excedida para o utilizador %(u)s no projeto %(p)s" #, python-format msgid "" "Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " "%(threshold)s over %(period)s seconds" msgstr "" "Alarme quando %(meter_name)s é %(comparison_operator)s uma %(statistic)s de " "%(threshold)s em %(period)s segundos" #, python-format msgid "Alarm when %s event occurred." msgstr "Alarme quando evento %s ocorreu." #, python-format msgid "Alarm with name=%s exists" msgstr "O aviso com o nome=%s já existe" #, python-format msgid "Alarm with name='%s' exists" msgstr "Alarme com o nome='%s' já existe" #, python-format msgid "Alarms %(alarm_ids)s are in unknown state" msgstr "Os alarmes %(alarm_ids)s encontram-se em estado desconhecido" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "Não é possível criar a tabela %(table_name)s - já existe. Ignorar erro" #, python-format msgid "Cannot specify alarm %s itself in combination rule" msgstr "Não é possivel especificar o próprio alarme %s na combinação de regras" #, python-format msgid "Combined state of alarms %s" msgstr "Estado combinado dos avisos %s" msgid "Configuration:" msgstr "Configuração:" #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "Ligar a %(db)s em %(nodelist)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Erro ao analisar resposta HTTP: %s" #, python-format msgid "Error while posting alarm: %s" msgstr "Erro ao postar o alarme: %s" #, python-format msgid "Error while putting alarm: %s" msgstr "Erro ao pôr o alarme: %s" #, python-format msgid "Failed to evaluate alarm %s" msgstr "Falha ao avaliar avisos %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Erro ao analisar o valor data/hora %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Expressão filtro inválida: %s" msgid "Limit should be positive" msgstr "O limite deve ser positivo" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Não Autorizado o acesso a %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Notificar alarme %(alarm_name)s %(alarm_id)s de %(severity)s prioridade de " "%(previous)s a %(current)s com a ação %(action)s devido a %(reason)s." #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s with severity %(severity)s from " "%(previous)s to %(current)s with action %(action)s because %(reason)s. " "request-id: %(request_id)s " msgstr "" "Notificar alarme %(alarm_name)s %(alarm_id)s de gravidade %(severity)s de " "%(previous)s a %(current)s com a ação %(action)s devido a %(reason)s. " "request-id: %(request_id)s " #, python-format msgid "Order-by expression not valid: %s" msgstr "Expressão ordenar por inválida: %s" #, python-format msgid "" "Remaining as %(state)s due to %(count)d samples %(disposition)s threshold, " "most recent: %(most_recent)s" msgstr "" "Mantendo-se como %(state)s devido a %(count)d amostras %(disposition)s no " "limite, mais recentes: %(most_recent)s" #, python-format msgid "Remaining as %(state)s due to alarms %(alarm_ids)s in state %(state)s" msgstr "" "Mantendo-se em %(state)s devido aos alarmes %(alarm_ids)s em estado %(state)s" #, python-format msgid "Starting server in PID %s" msgstr "Iniciar servidor em PID %s" #, python-format msgid "String %s is not a valid isotime" msgstr "A string %s não é um tempo ISO válido" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "O tipo de dados %(type)s não é suportado. A lista do tipo de dados " "suportados é: %(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "" "Os nomes das restrições de tempo deve ser únicos para um determinado aviso." #, python-format msgid "Timezone %s is not valid" msgstr "Fuso horário %s inválido" #, python-format msgid "" "Transition to %(state)s due to %(count)d samples %(disposition)s threshold, " "most recent: %(most_recent)s" msgstr "" "Transição para %(state)s devido a %(count)d amostras %(disposition)s no " "limite, mais recentes: %(most_recent)s" #, python-format msgid "Transition to %(state)s due to alarms %(alarm_ids)s in state %(state)s" msgstr "" "Transição para %(state)s devido aos alarmes %(alarm_ids)s em estado %(state)s" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Não é possível ligar ao servidor da base de dados: %(errmsg)s." #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Incapaz de converter o valor %(value)s para o tipo de dados esperados " "%(type)s." #, python-format msgid "Unable to notify alarm %s" msgstr "Incampaz de notificar o alarme %s" msgid "Unable to notify for an alarm with no action" msgstr "Incapaz de notificar um alarme sem ação" #, python-format msgid "Unable to parse action %(action)s for alarm %(alarm_id)s" msgstr "Incapaz de analisar a ação %(action)s para o alarme %(alarm_id)s" #, python-format msgid "Unable to parse action %s" msgstr "Incapaz de analisar a ação %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "Exceção inesperada ao converter %(value)s para o tipo de dados esperado " "%(type)s." #, python-format msgid "Unsupported action %s" msgstr "Ação não suportada %s" #, python-format msgid "You are not authorized to create action: %s" msgstr "Não tem permissão para criar a ação: %s" #, python-format msgid "alarm %(id)s transitioning to %(state)s because %(reason)s" msgstr "alarme %(id)s a passar a %(state)s devido a %(reason)s" msgid "alarm evaluation cycle failed" msgstr "o ciclo de avaliação do alarme falhou" msgid "alarm state update failed" msgstr "a atualização do estado do alarme falhou" msgid "alarm stats retrieval failed" msgstr "a extração da estatística do alarme falhou" #, python-format msgid "duplicate actions are found: %s, remove duplicate ones" msgstr "encontradas ações duplicadas: %s, remover as duplicadas" #, python-format msgid "initiating evaluation cycle on %d alarms" msgstr "iniciando ciclo de avaliação em alarme %d" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "ativo em 0.0.0.0:%(sport)s, ver em http://127.0.0.1:%(vport)s" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "ativo em http://%(host)s:%(port)s" msgid "state invalid" msgstr "estato inválido" msgid "state_timestamp should be datetime object" msgstr "state_timestamp deve ser um objeto data/hora" msgid "timestamp should be datetime object" msgstr "o timestamp deve ser um objeto data/hora" aodh-2.0.6/aodh/locale/pt/LC_MESSAGES/aodh-log-info.po0000664000567000056710000000273613076064372023160 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # AnaFonseca , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-17 02:23+0000\n" "Last-Translator: AnaFonseca \n" "Language: pt\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese\n" #, python-format msgid "%d alarm histories are removed from database" msgstr "%d históricos de alarme foram removidas da base de dados" msgid "Coordination backend started successfully." msgstr "Coordenação de backend iniciada com sucesso." #, python-format msgid "Dropping alarm history data with TTL %d" msgstr "Cessar histórico do alarme com TTL %d" #, python-format msgid "Joined partitioning group %s" msgstr "Entrou na partição do grupo %s" #, python-format msgid "Left partitioning group %s" msgstr "Saiu da partição do grupo %s" msgid "Nothing to clean, database alarm history time to live is disabled" msgstr "" "Nada a limpar, o histórico do alarme TTL, time to live, da base de dados " "está desativado." aodh-2.0.6/aodh/locale/aodh.pot0000664000567000056710000001764313076064372017427 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the aodh project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-19 06:30+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: aodh/api/app.py:93 #, python-format msgid "Starting server in PID %s" msgstr "" #: aodh/api/app.py:94 msgid "Configuration:" msgstr "" #: aodh/api/app.py:98 #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "" #: aodh/api/app.py:102 #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "" #: aodh/api/middleware.py:102 #, python-format msgid "Error parsing HTTP response: %s" msgstr "" #: aodh/api/controllers/v2/alarms.py:84 #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "" #: aodh/api/controllers/v2/alarms.py:166 #, python-format msgid "Timezone %s is not valid" msgstr "" #: aodh/api/controllers/v2/alarms.py:284 msgid "Time constraint names must be unique for a given alarm." msgstr "" #: aodh/api/controllers/v2/alarms.py:294 #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "" #: aodh/api/controllers/v2/alarms.py:305 #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "" #: aodh/api/controllers/v2/alarms.py:320 #, python-format msgid "duplicate actions are found: %s, remove duplicate ones" msgstr "" #: aodh/api/controllers/v2/alarms.py:326 #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "" #: aodh/api/controllers/v2/alarms.py:338 #, python-format msgid "Unable to parse action %s" msgstr "" #: aodh/api/controllers/v2/alarms.py:341 #, python-format msgid "Unsupported action %s" msgstr "" #: aodh/api/controllers/v2/alarms.py:344 #, python-format msgid "You are not authorized to create action: %s" msgstr "" #: aodh/api/controllers/v2/alarms.py:600 #, python-format msgid "Alarm with name=%s exists" msgstr "" #: aodh/api/controllers/v2/alarms.py:612 #, python-format msgid "Error while putting alarm: %s" msgstr "" #: aodh/api/controllers/v2/alarms.py:613 aodh/api/controllers/v2/alarms.py:779 msgid "Alarm incorrect" msgstr "" #: aodh/api/controllers/v2/alarms.py:671 msgid "state invalid" msgstr "" #: aodh/api/controllers/v2/alarms.py:772 #, python-format msgid "Alarm with name='%s' exists" msgstr "" #: aodh/api/controllers/v2/alarms.py:778 #, python-format msgid "Error while posting alarm: %s" msgstr "" #: aodh/api/controllers/v2/base.py:49 #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "" #: aodh/api/controllers/v2/base.py:58 #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "" #: aodh/api/controllers/v2/base.py:201 #, python-format msgid "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" #: aodh/api/controllers/v2/base.py:206 #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is:" " %(supported)s" msgstr "" #: aodh/api/controllers/v2/base.py:211 #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type " "%(type)s." msgstr "" #: aodh/api/controllers/v2/base.py:221 #, python-format msgid "Alarm %s not found" msgstr "" #: aodh/api/controllers/v2/base.py:223 #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "" #: aodh/api/controllers/v2/query.py:201 #, python-format msgid "Filter expression not valid: %s" msgstr "" #: aodh/api/controllers/v2/query.py:216 #, python-format msgid "Order-by expression not valid: %s" msgstr "" #: aodh/api/controllers/v2/query.py:226 msgid "Limit should be positive" msgstr "" #: aodh/api/controllers/v2/query.py:333 #, python-format msgid "String %s is not a valid isotime" msgstr "" #: aodh/api/controllers/v2/query.py:334 #, python-format msgid "Failed to parse the timestamp value %s" msgstr "" #: aodh/api/controllers/v2/alarm_rules/combination.py:39 #, python-format msgid "Combined state of alarms %s" msgstr "" #: aodh/api/controllers/v2/alarm_rules/combination.py:48 msgid "Alarm combination rule should contain at least two different alarm ids." msgstr "" #: aodh/api/controllers/v2/alarm_rules/combination.py:69 #, python-format msgid "Cannot specify alarm %s itself in combination rule" msgstr "" #: aodh/api/controllers/v2/alarm_rules/event.py:48 #, python-format msgid "Alarm when %s event occurred." msgstr "" #: aodh/api/controllers/v2/alarm_rules/threshold.py:93 #, python-format msgid "" "Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " "%(threshold)s over %(period)s seconds" msgstr "" #: aodh/evaluator/__init__.py:118 #, python-format msgid "alarm %(id)s transitioning to %(state)s because %(reason)s" msgstr "" #: aodh/evaluator/__init__.py:129 msgid "alarm state update failed" msgstr "" #: aodh/evaluator/__init__.py:208 #, python-format msgid "initiating evaluation cycle on %d alarms" msgstr "" #: aodh/evaluator/__init__.py:213 msgid "alarm evaluation cycle failed" msgstr "" #: aodh/evaluator/__init__.py:225 #, python-format msgid "Failed to evaluate alarm %s" msgstr "" #: aodh/evaluator/combination.py:60 #, python-format msgid "Alarms %(alarm_ids)s are in unknown state" msgstr "" #: aodh/evaluator/combination.py:82 #, python-format msgid "Transition to %(state)s due to alarms %(alarm_ids)s in state %(state)s" msgstr "" #: aodh/evaluator/combination.py:86 #, python-format msgid "Remaining as %(state)s due to alarms %(alarm_ids)s in state %(state)s" msgstr "" #: aodh/evaluator/event.py:250 #, python-format msgid "Event (message_id=%(message)s) hit the query of alarm (id=%(alarm)s)" msgstr "" #: aodh/evaluator/gnocchi.py:77 aodh/evaluator/threshold.py:120 msgid "alarm stats retrieval failed" msgstr "" #: aodh/evaluator/threshold.py:137 #, python-format msgid "%d datapoints are unknown" msgstr "" #: aodh/evaluator/threshold.py:161 #, python-format msgid "" "Transition to %(state)s due to %(count)d samples %(disposition)s " "threshold, most recent: %(most_recent)s" msgstr "" #: aodh/evaluator/threshold.py:165 #, python-format msgid "" "Remaining as %(state)s due to %(count)d samples %(disposition)s " "threshold, most recent: %(most_recent)s" msgstr "" #: aodh/notifier/__init__.py:127 #, python-format msgid "Unable to parse action %(action)s for alarm %(alarm_id)s" msgstr "" #: aodh/notifier/__init__.py:136 #, python-format msgid "Action %(scheme)s for alarm %(alarm_id)s is unknown, cannot notify" msgstr "" #: aodh/notifier/__init__.py:147 #, python-format msgid "Unable to notify alarm %s" msgstr "" #: aodh/notifier/__init__.py:160 msgid "Unable to notify for an alarm with no action" msgstr "" #: aodh/notifier/log.py:31 #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from" " %(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" #: aodh/notifier/rest.py:68 #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s with severity %(severity)s " "from %(previous)s to %(current)s with action %(action)s because " "%(reason)s. request-id: %(request_id)s " msgstr "" #: aodh/storage/models.py:70 msgid "timestamp should be datetime object" msgstr "" #: aodh/storage/models.py:72 msgid "state_timestamp should be datetime object" msgstr "" #: aodh/storage/hbase/utils.py:230 #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" #: aodh/storage/mongo/utils.py:77 #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "" #: aodh/storage/mongo/utils.py:85 #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "" aodh-2.0.6/aodh/locale/aodh-log-warning.pot0000664000567000056710000000153113076064372021636 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # FIRST AUTHOR , 2015. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: aodh 0.0.1.dev3988\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2015-07-29 06:41+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.0\n" #: aodh/api/app.py:79 msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" #: aodh/evaluator/threshold.py:141 #, python-format msgid "Expecting %(expected)d datapoints but only get %(actual)d" msgstr "" aodh-2.0.6/aodh/locale/ru/0000775000567000056710000000000013076064720016400 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/locale/ru/LC_MESSAGES/0000775000567000056710000000000013076064720020165 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/locale/ru/LC_MESSAGES/aodh-log-warning.po0000664000567000056710000000246413076064372023673 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # Altinbek , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-07-10 07:14+0000\n" "Last-Translator: Altinbek \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "Expecting %(expected)d datapoints but only get %(actual)d" msgstr "Ожидайте %(expected)d точки данных, только получить %(actual)d код" msgid "" "pecan_debug cannot be enabled, if workers is > 1, the value is overrided " "with False" msgstr "" "pecan_debug не может быть включен, если включено значение > 1, перекрытая " "False" aodh-2.0.6/aodh/locale/ru/LC_MESSAGES/aodh.po0000664000567000056710000002605713076064372021455 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # Altinbek , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-08-24 07:19+0000\n" "Last-Translator: openstackjenkins \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "%(entity)s %(id)s Not Found" msgstr "%(entity)s %(id)s не найдены" #, python-format msgid "%(name)s count exceeds maximum value %(maximum)d" msgstr "контент %(name)s превышает количество символов в %(maximum)d" #, python-format msgid "%(rule)s must be set for %(type)s type alarm" msgstr "%(rule)s должны быть установлены для %(type)s сигналов тревоги" #, python-format msgid "%(rule1)s and %(rule2)s cannot be set at the same time" msgstr "%(rule1)s и %(rule2)s не могут работать одновременно" #, python-format msgid "%d datapoints are unknown" msgstr "%d исходный код данных не известен" #, python-format msgid "Action %(scheme)s for alarm %(alarm_id)s is unknown, cannot notify" msgstr "" "Без определения типа сигнала уведомленияn %(scheme)s не возможно " "действовать %(alarm_id)s " #, python-format msgid "Alarm %(alarm_id)s not found in project %(project)s" msgstr "Сигнал %(alarm_id)s не найдены в проекте %(project)s" #, python-format msgid "Alarm %s not found" msgstr "Сигнал %s не найден" msgid "Alarm combination rule should contain at least two different alarm ids." msgstr "" "сигнал тревоги должно содержать по крайней мере два различных " "идентификаторов сигнализации." msgid "Alarm incorrect" msgstr "Сигнализация неисправна" #, python-format msgid "Alarm quota exceeded for user %(u)s on project %(p)s" msgstr "количество ошибок пользователем %(u)s превысила норму %(p)s" #, python-format msgid "" "Alarm when %(meter_name)s is %(comparison_operator)s a %(statistic)s of " "%(threshold)s over %(period)s seconds" msgstr "" "При срабатываемости сигналатревоги %(meter_name)s как " "%(comparison_operator)s a %(statistic)s в %(threshold)s срабатывает за " "%(period)s секунду" #, python-format msgid "Alarm with name=%s exists" msgstr "Сигнализация с именем=%s существует" #, python-format msgid "Alarm with name='%s' exists" msgstr "Сигнализация с названием='%s' существует" #, python-format msgid "Alarms %(alarm_ids)s are in unknown state" msgstr "Сигнализация %(alarm_ids)s в не корректном состоянии" #, python-format msgid "Cannot create table %(table_name)s it already exists. Ignoring error" msgstr "" "Не удается создать таблицу %(table_name)s, она уже существует. Игнорирование " "ошибки" #, python-format msgid "Cannot specify alarm %s itself in combination rule" msgstr "Не указывать сигнал тревоги %s в комбинации правило" #, python-format msgid "Combined state of alarms %s" msgstr "смешанное состояние тревоги %s" msgid "Configuration:" msgstr "Конфигурация: " #, python-format msgid "Connecting to %(db)s on %(nodelist)s" msgstr "Подключение к %(db)s на %(nodelist)s" #, python-format msgid "Error parsing HTTP response: %s" msgstr "Ошибка ввода HTTP кода: %s" #, python-format msgid "Error while posting alarm: %s" msgstr "Ошибка при отправке сигнала тревоги: %s" #, python-format msgid "Error while putting alarm: %s" msgstr "Ошибка во время установки тревоги: %s" #, python-format msgid "Failed to evaluate alarm %s" msgstr "не удалось определить тип тревоги %s" #, python-format msgid "Failed to parse the timestamp value %s" msgstr "Не удалось разобрать значение временной метки %s" #, python-format msgid "Filter expression not valid: %s" msgstr "Фильтр ввода не действует: %s" msgid "Limit should be positive" msgstr "Лимит должен быть точным" #, python-format msgid "Not Authorized to access %(aspect)s %(id)s" msgstr "Нет доступа к %(aspect)s %(id)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s of %(severity)s priority from " "%(previous)s to %(current)s with action %(action)s because %(reason)s." msgstr "" "Сигнал тревоги %(alarm_name)s %(alarm_id)s не работает потому что " "%(reason)s в %(severity)s приоритетом на %(previous)s %(current)s " "влияние на действие %(action)s" #, python-format msgid "" "Notifying alarm %(alarm_name)s %(alarm_id)s with severity %(severity)s from " "%(previous)s to %(current)s with action %(action)s because %(reason)s. " "request-id: %(request_id)s " msgstr "" "сигналом тревоги %(alarm_name)s %(alarm_id)s не работает %(severity)s " "потому что %(previous)s причина %(current)s в действиях %(action)s " "%(reason)s Запрос-ID: %(request_id)s" #, python-format msgid "Order-by expression not valid: %s" msgstr "вызов значения не активна: %s" #, python-format msgid "" "Remaining as %(state)s due to %(count)d samples %(disposition)s threshold, " "most recent: %(most_recent)s" msgstr "" "в последнее время %(most_recent)s оставаясь %(state)s у порога из-за " "%(count)d испытательных сигналов %(disposition)s" #, python-format msgid "Remaining as %(state)s due to alarms %(alarm_ids)s in state %(state)s" msgstr "" "Оставаясь %(state)s в состоянии уведомления %(state)s из-за тревоги " "%(alarm_ids)s" #, python-format msgid "Starting server in PID %s" msgstr "сервер начинается с PID %s" #, python-format msgid "String %s is not a valid isotime" msgstr "Строка %s временно не активна" #, python-format msgid "" "The data type %(type)s is not supported. The supported data type list is: " "%(supported)s" msgstr "" "Тип данных %(type)s не поддерживается. Список поддерживаемых типов данных: " "%(supported)s" msgid "Time constraint names must be unique for a given alarm." msgstr "Название временного контента должна отличаться для сигнала превоги" #, python-format msgid "Timezone %s is not valid" msgstr "таймер %s не актевирован" #, python-format msgid "" "Transition to %(state)s due to %(count)d samples %(disposition)s threshold, " "most recent: %(most_recent)s" msgstr "" "последнее время %(most_recent)s, переход в состояния оповещения %(state)s " "из-за %(count)d попыток испытания%(disposition)s у порога" #, python-format msgid "Transition to %(state)s due to alarms %(alarm_ids)s in state %(state)s" msgstr "" "Переход %(state)s из состояния тревоги %(alarm_ids)s в положение %(state)s" #, python-format msgid "Unable to connect to the database server: %(errmsg)s." msgstr "Не удается подключиться к серверу базы данных: %(errmsg)s. " #, python-format msgid "" "Unable to convert the value %(value)s to the expected data type %(type)s." msgstr "" "Невозможно преобразовать значение %(value)s с ожидаемым типом данных " "%(type)s." #, python-format msgid "Unable to notify alarm %s" msgstr "Невозможно оповещать о тревоге %s" msgid "Unable to notify for an alarm with no action" msgstr "Невозможно оповещать о тревоге, без действия на него" #, python-format msgid "Unable to parse action %(action)s for alarm %(alarm_id)s" msgstr "" "Невозможно разобраться в действиях %(action)s в сигнализации %(alarm_id)s" #, python-format msgid "Unable to parse action %s" msgstr "Невозможно разобрать действий %s" #, python-format msgid "" "Unexpected exception converting %(value)s to the expected data type %(type)s." msgstr "" "мгновенное преобразования значения %(value)s с ожидаемым типом данных " "%(type)s." #, python-format msgid "Unsupported action %s" msgstr "не поддерживается действие %s" #, python-format msgid "You are not authorized to create action: %s" msgstr "Вы не авторизованы, чтобы деиствовать: %s" #, python-format msgid "alarm %(id)s transitioning to %(state)s because %(reason)s" msgstr "" "Сигнализация %(id)s приводится в действие %(state)s потому что %(reason)s" msgid "alarm evaluation cycle failed" msgstr "Оценка цикла тревог не удался" msgid "alarm state update failed" msgstr "сигнал тревоги определить не удалось " msgid "alarm stats retrieval failed" msgstr "Статистика сигнал оповещения не получен" #, python-format msgid "duplicate actions are found: %s, remove duplicate ones" msgstr "дублирующие действия найдены в: %s, удалить дубликаты из них" #, python-format msgid "initiating evaluation cycle on %d alarms" msgstr "приступим к оценке цикла %d alarms" #, python-format msgid "serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s" msgstr "услуга на 0.0.0.0:%(sport)s, указывает на http://127.0.0.1:%(vport)s" #, python-format msgid "serving on http://%(host)s:%(port)s" msgstr "указатели на http://%(host)s:%(port)s" msgid "state invalid" msgstr "Неправильное состояние" msgid "state_timestamp should be datetime object" msgstr "В state_timestamp должен быть указан дата объекта" msgid "timestamp should be datetime object" msgstr "должна быть указана дата вывода объекта" aodh-2.0.6/aodh/locale/ru/LC_MESSAGES/aodh-log-info.po0000664000567000056710000000333413076064372023156 0ustar jenkinsjenkins00000000000000# Translations template for aodh. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the aodh project. # # Translators: # Altinbek , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: aodh 2.0.0.0b2.dev28\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-01-18 11:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-07-10 07:09+0000\n" "Last-Translator: Altinbek \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" #, python-format msgid "%d alarm histories are removed from database" msgstr "%d истории сигнализации удаляются из базы данных" msgid "Coordination backend started successfully." msgstr "Координация баз успешно запущен." #, python-format msgid "Dropping alarm history data with TTL %d" msgstr "Удаление истории данных сигнализации с TTL %d" #, python-format msgid "Joined partitioning group %s" msgstr "регистрация закрытой группы %s" #, python-format msgid "Left partitioning group %s" msgstr "Левай подгруппа %s" msgid "Nothing to clean, database alarm history time to live is disabled" msgstr "Ничто не удалять, базы данных истории об уведомлении отключена" aodh-2.0.6/aodh/opts.py0000664000567000056710000000543413076064372016063 0ustar jenkinsjenkins00000000000000# Copyright 2014-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from keystoneauth1 import loading from oslo_config import cfg import aodh.api import aodh.api.controllers.v2.alarms import aodh.coordination import aodh.evaluator import aodh.evaluator.event import aodh.evaluator.gnocchi import aodh.event import aodh.keystone_client import aodh.notifier.rest import aodh.rpc import aodh.service import aodh.storage def list_opts(): return [ ('DEFAULT', itertools.chain( aodh.evaluator.OPTS, aodh.evaluator.event.OPTS, aodh.evaluator.gnocchi.OPTS, aodh.event.OPTS, aodh.notifier.OPTS, aodh.notifier.rest.OPTS, aodh.queue.OPTS, aodh.rpc.OPTS, aodh.service.OPTS, aodh.api.controllers.v2.alarms.ALARM_API_OPTS)), ('api', itertools.chain( aodh.api.OPTS, [ cfg.StrOpt( 'paste_config', deprecated_name='api_paste_config', deprecated_group='DEFAULT', default="api_paste.ini", help="Configuration file for WSGI definition of API."), cfg.IntOpt( 'workers', default=1, deprecated_name='api_workers', deprecated_group='DEFAULT', min=1, help='Number of workers for aodh API server.'), cfg.BoolOpt('pecan_debug', default=False, help='Toggle Pecan Debug Middleware.'), ])), ('coordination', aodh.coordination.OPTS), ('database', aodh.storage.OPTS), ('service_credentials', aodh.keystone_client.OPTS), ] def list_keystoneauth_opts(): # NOTE(sileht): the configuration file contains only the options # for the password plugin that handles keystone v2 and v3 API # with discovery. But other options are possible. # Also, the default loaded plugin is password-aodh-legacy for # backward compatibily return [('service_credentials', ( loading.get_auth_common_conf_options() + loading.get_auth_plugin_conf_options('password')))] aodh-2.0.6/aodh/hacking/0000775000567000056710000000000013076064720016117 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/hacking/checks.py0000664000567000056710000000452213076064372017737 0ustar jenkinsjenkins00000000000000# Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for aodh specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range C3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the C3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to aodh/tests/test_hacking.py """ import re # TODO(zqfan): When other oslo libraries switch over non-namespace'd # imports, we need to add them to the regexp below. oslo_namespace_imports = re.compile( r"(from|import) oslo[.](config|utils|i18n|serialization)") def check_oslo_namespace_imports(logical_line, physical_line, filename): # ignore openstack.common since they are not maintained by us if 'aodh/openstack/common/' in filename: return if re.match(oslo_namespace_imports, logical_line): msg = ("C300: '%s' must be used instead of '%s'." % ( logical_line.replace('oslo.', 'oslo_'), logical_line)) yield(0, msg) def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. * Use filename so we can start enforcing this in specific folders instead of needing to do so all at once. N319 """ if logical_line.startswith("LOG.debug(_("): yield(0, "N319 Don't translate debug level logs") def factory(register): register(check_oslo_namespace_imports) register(no_translate_debug_logs) aodh-2.0.6/aodh/hacking/__init__.py0000664000567000056710000000000013076064372020221 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/conf/0000775000567000056710000000000013076064720015440 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/conf/defaults.py0000664000567000056710000000274213076064372017631 0ustar jenkinsjenkins00000000000000# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_middleware import cors def set_cors_middleware_defaults(): """Update default configuration options for oslo.middleware.""" # CORS Defaults # TODO(krotscheck): Update with https://review.openstack.org/#/c/285368/ cfg.set_defaults(cors.CORS_OPTS, allow_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token'], expose_headers=['X-Auth-Token', 'X-Openstack-Request-Id', 'X-Subject-Token'], allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'] ) aodh-2.0.6/aodh/conf/__init__.py0000664000567000056710000000000013076064371017541 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/event.py0000664000567000056710000000402213076064372016207 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg import oslo_messaging from oslo_service import service from aodh.evaluator import event from aodh import messaging from aodh import storage OPTS = [ cfg.StrOpt('event_alarm_topic', default='alarm.all', help='The topic that aodh uses for event alarm evaluation.'), ] class EventAlarmEndpoint(object): def __init__(self, evaluator): self.evaluator = evaluator def sample(self, ctxt, publisher_id, event_type, payload, metadata): # TODO(r-mibu): requeue on error self.evaluator.evaluate_events(payload) class EventAlarmEvaluationService(service.Service): def __init__(self, conf): super(EventAlarmEvaluationService, self).__init__() self.conf = conf self.storage_conn = storage.get_connection_from_config(self.conf) self.evaluator = event.EventAlarmEvaluator(self.conf) def start(self): super(EventAlarmEvaluationService, self).start() self.listener = messaging.get_notification_listener( messaging.get_transport(self.conf), [oslo_messaging.Target(topic=self.conf.event_alarm_topic)], [EventAlarmEndpoint(self.evaluator)]) self.listener.start() # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) def stop(self): self.listener.stop() self.listener.wait() super(EventAlarmEvaluationService, self).stop() aodh-2.0.6/aodh/i18n.py0000664000567000056710000000252013076064372015646 0ustar jenkinsjenkins00000000000000# Copyright 2014 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """oslo.i18n integration module. See http://docs.openstack.org/developer/oslo.i18n/usage.html """ import oslo_i18n DOMAIN = 'aodh' _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # Translators for log levels. # # The abbreviated names are meant to reflect the usual use of a short # name like '_'. The "L" is for "log" and the other letter comes from # the level. _LI = _translators.log_info _LW = _translators.log_warning _LE = _translators.log_error _LC = _translators.log_critical def translate(value, user_locale): return oslo_i18n.translate(value, user_locale) def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) aodh-2.0.6/aodh/coordination.py0000664000567000056710000002323213076064372017562 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import bisect import hashlib import struct import uuid from oslo_config import cfg from oslo_log import log import retrying import six import tooz.coordination from aodh.i18n import _LE, _LI, _LW LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('backend_url', help='The backend URL to use for distributed coordination. If ' 'left empty, per-deployment central agent and per-host ' 'compute agent won\'t do workload ' 'partitioning and will only function correctly if a ' 'single instance of that service is running.'), cfg.FloatOpt('heartbeat', default=1.0, help='Number of seconds between heartbeats for distributed ' 'coordination.'), cfg.FloatOpt('check_watchers', default=10.0, help='Number of seconds between checks to see if group ' 'membership has changed'), cfg.IntOpt('retry_backoff', default=1, help='Retry backoff factor when retrying to connect with' ' coordination backend'), cfg.IntOpt('max_retry_interval', default=30, help='Maximum number of seconds between retry to join ' 'partitioning group') ] class ErrorJoiningPartitioningGroup(Exception): def __init__(self): super(ErrorJoiningPartitioningGroup, self).__init__(_LE( 'Error occurred when joining partitioning group')) class MemberNotInGroupError(Exception): def __init__(self, group_id, members, my_id): super(MemberNotInGroupError, self).__init__(_LE( 'Group ID: %{group_id}s, Members: %{members}s, Me: %{me}s: ' 'Current agent is not part of group and cannot take tasks') % {'group_id': group_id, 'members': members, 'me': my_id}) def retry_on_error_joining_partition(exception): return isinstance(exception, ErrorJoiningPartitioningGroup) def retry_on_member_not_in_group(exception): return isinstance(exception, MemberNotInGroupError) class HashRing(object): def __init__(self, nodes, replicas=100): self._ring = dict() self._sorted_keys = [] for node in nodes: for r in six.moves.range(replicas): hashed_key = self._hash('%s-%s' % (node, r)) self._ring[hashed_key] = node self._sorted_keys.append(hashed_key) self._sorted_keys.sort() @staticmethod def _hash(key): return struct.unpack_from('>I', hashlib.md5(str(key).encode()).digest())[0] def _get_position_on_ring(self, key): hashed_key = self._hash(key) position = bisect.bisect(self._sorted_keys, hashed_key) return position if position < len(self._sorted_keys) else 0 def get_node(self, key): if not self._ring: return None pos = self._get_position_on_ring(key) return self._ring[self._sorted_keys[pos]] class PartitionCoordinator(object): """Workload partitioning coordinator. This class uses the `tooz` library to manage group membership. To ensure that the other agents know this agent is still alive, the `heartbeat` method should be called periodically. Coordination errors and reconnects are handled under the hood, so the service using the partition coordinator need not care whether the coordination backend is down. The `extract_my_subset` will simply return an empty iterable in this case. """ def __init__(self, conf, my_id=None): self.conf = conf self.backend_url = self.conf.coordination.backend_url self._coordinator = None self._groups = set() self._my_id = my_id or str(uuid.uuid4()) def start(self): if self.backend_url: try: self._coordinator = tooz.coordination.get_coordinator( self.backend_url, self._my_id) self._coordinator.start() LOG.info(_LI('Coordination backend started successfully.')) except tooz.coordination.ToozError: LOG.exception(_LE('Error connecting to coordination backend.')) def stop(self): if not self._coordinator: return for group in list(self._groups): self.leave_group(group) try: self._coordinator.stop() except tooz.coordination.ToozError: LOG.exception(_LE('Error connecting to coordination backend.')) finally: self._coordinator = None def is_active(self): return self._coordinator is not None def heartbeat(self): if self._coordinator: if not self._coordinator.is_started: # re-connect self.start() try: self._coordinator.heartbeat() except tooz.coordination.ToozError: LOG.exception(_LE('Error sending a heartbeat to coordination ' 'backend.')) def watch_group(self, namespace, callback): if self._coordinator: self._coordinator.watch_join_group(namespace, callback) self._coordinator.watch_leave_group(namespace, callback) def run_watchers(self): if self._coordinator: self._coordinator.run_watchers() def join_group(self, group_id): if (not self._coordinator or not self._coordinator.is_started or not group_id): return retry_backoff = self.conf.coordination.retry_backoff * 1000 max_retry_interval = self.conf.coordination.max_retry_interval * 1000 @retrying.retry( wait_exponential_multiplier=retry_backoff, wait_exponential_max=max_retry_interval, retry_on_exception=retry_on_error_joining_partition, wrap_exception=True) def _inner(): try: join_req = self._coordinator.join_group(group_id) join_req.get() LOG.info(_LI('Joined partitioning group %s'), group_id) except tooz.coordination.MemberAlreadyExist: return except tooz.coordination.GroupNotCreated: create_grp_req = self._coordinator.create_group(group_id) try: create_grp_req.get() except tooz.coordination.GroupAlreadyExist: pass raise ErrorJoiningPartitioningGroup() except tooz.coordination.ToozError: LOG.exception(_LE('Error joining partitioning group %s,' ' re-trying'), group_id) raise ErrorJoiningPartitioningGroup() self._groups.add(group_id) return _inner() def leave_group(self, group_id): if group_id not in self._groups: return if self._coordinator: self._coordinator.leave_group(group_id) self._groups.remove(group_id) LOG.info(_LI('Left partitioning group %s'), group_id) def _get_members(self, group_id): if not self._coordinator: return [self._my_id] while True: get_members_req = self._coordinator.get_members(group_id) try: return get_members_req.get() except tooz.coordination.GroupNotCreated: self.join_group(group_id) @retrying.retry(stop_max_attempt_number=5, wait_random_max=2000, retry_on_exception=retry_on_member_not_in_group) def extract_my_subset(self, group_id, universal_set): """Filters an iterable, returning only objects assigned to this agent. We have a list of objects and get a list of active group members from `tooz`. We then hash all the objects into buckets and return only the ones that hashed into *our* bucket. """ if not group_id: return universal_set if group_id not in self._groups: self.join_group(group_id) try: members = self._get_members(group_id) LOG.debug('Members of group: %s, Me: %s', members, self._my_id) if self._my_id not in members: LOG.warning(_LW('Cannot extract tasks because agent failed to ' 'join group properly. Rejoining group.')) self.join_group(group_id) members = self._get_members(group_id) if self._my_id not in members: raise MemberNotInGroupError(group_id, members, self._my_id) LOG.debug('Members of group: %s, Me: %s', members, self._my_id) hr = HashRing(members) LOG.debug('Universal set: %s', universal_set) my_subset = [v for v in universal_set if hr.get_node(str(v)) == self._my_id] LOG.debug('My subset: %s', my_subset) return my_subset except tooz.coordination.ToozError: LOG.exception(_LE('Error getting group membership info from ' 'coordination backend.')) return [] aodh-2.0.6/aodh/api/0000775000567000056710000000000013076064720015264 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/api/controllers/0000775000567000056710000000000013076064720017632 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/api/controllers/root.py0000664000567000056710000000347313076064372021201 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from aodh.api.controllers.v2 import root as v2 MEDIA_TYPE_JSON = 'application/vnd.openstack.telemetry-%s+json' MEDIA_TYPE_XML = 'application/vnd.openstack.telemetry-%s+xml' class RootController(object): v2 = v2.V2Controller() @pecan.expose('json') def index(self): base_url = pecan.request.host_url available = [{'tag': 'v2', 'date': '2013-02-13T00:00:00Z', }] collected = [version_descriptor(base_url, v['tag'], v['date']) for v in available] versions = {'versions': {'values': collected}} return versions def version_descriptor(base_url, version, released_on): url = version_url(base_url, version) return { 'id': version, 'links': [ {'href': url, 'rel': 'self', }, {'href': 'http://docs.openstack.org/', 'rel': 'describedby', 'type': 'text/html', }], 'media-types': [ {'base': 'application/json', 'type': MEDIA_TYPE_JSON % version, }, {'base': 'application/xml', 'type': MEDIA_TYPE_XML % version, }], 'status': 'stable', 'updated': released_on, } def version_url(base_url, version_number): return '%s/%s' % (base_url, version_number) aodh-2.0.6/aodh/api/controllers/__init__.py0000664000567000056710000000000013076064371021733 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/api/controllers/v2/0000775000567000056710000000000013076064720020161 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/api/controllers/v2/capabilities.py0000664000567000056710000001012313076064372023164 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from pecan import rest import six from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from aodh.api.controllers.v2 import base def _decode_unicode(input): """Decode the unicode of the message, and encode it into utf-8.""" if isinstance(input, dict): temp = {} # If the input data is a dict, create an equivalent dict with a # predictable insertion order to avoid inconsistencies in the # message signature computation for equivalent payloads modulo # ordering for key, value in sorted(six.iteritems(input)): temp[_decode_unicode(key)] = _decode_unicode(value) return temp elif isinstance(input, (tuple, list)): # When doing a pair of JSON encode/decode operations to the tuple, # the tuple would become list. So we have to generate the value as # list here. return [_decode_unicode(element) for element in input] elif isinstance(input, six.text_type): return input.encode('utf-8') else: return input def _recursive_keypairs(d, separator=':'): """Generator that produces sequence of keypairs for nested dictionaries.""" for name, value in sorted(six.iteritems(d)): if isinstance(value, dict): for subname, subvalue in _recursive_keypairs(value, separator): yield ('%s%s%s' % (name, separator, subname), subvalue) elif isinstance(value, (tuple, list)): yield name, _decode_unicode(value) else: yield name, value def _flatten_capabilities(capabilities): return dict((k, v) for k, v in _recursive_keypairs(capabilities)) class Capabilities(base.Base): """A representation of the API and storage capabilities. Usually constrained by restrictions imposed by the storage driver. """ api = {wtypes.text: bool} "A flattened dictionary of API capabilities" alarm_storage = {wtypes.text: bool} "A flattened dictionary of alarm storage capabilities" @classmethod def sample(cls): return cls( api=_flatten_capabilities({ 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, }), alarm_storage=_flatten_capabilities( {'storage': {'production_ready': True}}), ) class CapabilitiesController(rest.RestController): """Manages capabilities queries.""" @wsme_pecan.wsexpose(Capabilities) def get(self): """Returns a flattened dictionary of API capabilities. Capabilities supported by the currently configured storage driver. """ # variation in API capabilities is effectively determined by # the lack of strict feature parity across storage drivers alarm_conn = pecan.request.alarm_storage_conn driver_capabilities = { 'alarms': alarm_conn.get_capabilities()['alarms'], } alarm_driver_perf = alarm_conn.get_storage_capabilities() return Capabilities(api=_flatten_capabilities(driver_capabilities), alarm_storage=_flatten_capabilities( alarm_driver_perf)) aodh-2.0.6/aodh/api/controllers/v2/alarms.py0000664000567000056710000007160113076064372022022 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import itertools import json import uuid import croniter from oslo_config import cfg from oslo_log import log from oslo_utils import netutils from oslo_utils import timeutils import pecan from pecan import rest import pytz import six from six.moves.urllib import parse as urlparse from stevedore import extension import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan import aodh from aodh.api.controllers.v2.alarm_rules import combination from aodh.api.controllers.v2 import base from aodh.api.controllers.v2 import utils as v2_utils from aodh.api import rbac from aodh.i18n import _ from aodh import keystone_client from aodh import messaging from aodh import notifier from aodh.storage import models LOG = log.getLogger(__name__) ALARM_API_OPTS = [ cfg.IntOpt('user_alarm_quota', deprecated_group="alarm", help='Maximum number of alarms defined for a user.' ), cfg.IntOpt('project_alarm_quota', deprecated_group="alarm", help='Maximum number of alarms defined for a project.' ), cfg.IntOpt('alarm_max_actions', default=-1, deprecated_group="alarm", help='Maximum count of actions for each state of an alarm, ' 'non-positive number means no limit.'), ] state_kind = ["ok", "alarm", "insufficient data"] state_kind_enum = wtypes.Enum(str, *state_kind) severity_kind = ["low", "moderate", "critical"] severity_kind_enum = wtypes.Enum(str, *severity_kind) class OverQuota(base.ClientSideError): def __init__(self, data): d = { 'u': data.user_id, 'p': data.project_id } super(OverQuota, self).__init__( _("Alarm quota exceeded for user %(u)s on project %(p)s") % d, status_code=403) def is_over_quota(conn, project_id, user_id): """Returns False if an alarm is within the set quotas, True otherwise. :param conn: a backend connection object :param project_id: the ID of the project setting the alarm :param user_id: the ID of the user setting the alarm """ over_quota = False # Start by checking for user quota user_alarm_quota = pecan.request.cfg.user_alarm_quota if user_alarm_quota is not None: user_alarms = list(conn.get_alarms(user=user_id)) over_quota = len(user_alarms) >= user_alarm_quota # If the user quota isn't reached, we check for the project quota if not over_quota: project_alarm_quota = pecan.request.cfg.project_alarm_quota if project_alarm_quota is not None: project_alarms = list(conn.get_alarms(project=project_id)) over_quota = len(project_alarms) >= project_alarm_quota return over_quota class CronType(wtypes.UserType): """A user type that represents a cron format.""" basetype = six.string_types name = 'cron' @staticmethod def validate(value): # raises ValueError if invalid croniter.croniter(value) return value class AlarmTimeConstraint(base.Base): """Representation of a time constraint on an alarm.""" name = wsme.wsattr(wtypes.text, mandatory=True) "The name of the constraint" _description = None # provide a default def get_description(self): if not self._description: return ('Time constraint at %s lasting for %s seconds' % (self.start, self.duration)) return self._description def set_description(self, value): self._description = value description = wsme.wsproperty(wtypes.text, get_description, set_description) "The description of the constraint" start = wsme.wsattr(CronType(), mandatory=True) "Start point of the time constraint, in cron format" duration = wsme.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) "How long the constraint should last, in seconds" timezone = wsme.wsattr(wtypes.text, default="") "Timezone of the constraint" def as_dict(self): return self.as_dict_from_keys(['name', 'description', 'start', 'duration', 'timezone']) @staticmethod def validate(tc): if tc.timezone: try: pytz.timezone(tc.timezone) except Exception: raise base.ClientSideError(_("Timezone %s is not valid") % tc.timezone) return tc @classmethod def sample(cls): return cls(name='SampleConstraint', description='nightly build every night at 23h for 3 hours', start='0 23 * * *', duration=10800, timezone='Europe/Ljubljana') ALARMS_RULES = extension.ExtensionManager("aodh.alarm.rule") LOG.debug("alarm rules plugin loaded: %s" % ",".join(ALARMS_RULES.names())) ACTIONS_SCHEMA = extension.ExtensionManager( notifier.AlarmNotifierService.NOTIFIER_EXTENSIONS_NAMESPACE).names() class Alarm(base.Base): """Representation of an alarm. .. note:: combination_rule and threshold_rule are mutually exclusive. The *type* of the alarm should be set to *threshold* or *combination* and the appropriate rule should be filled. """ alarm_id = wtypes.text "The UUID of the alarm" name = wsme.wsattr(wtypes.text, mandatory=True) "The name for the alarm" _description = None # provide a default def get_description(self): rule = getattr(self, '%s_rule' % self.type, None) if not self._description: if hasattr(rule, 'default_description'): return six.text_type(rule.default_description) return "%s alarm rule" % self.type return self._description def set_description(self, value): self._description = value description = wsme.wsproperty(wtypes.text, get_description, set_description) "The description of the alarm" enabled = wsme.wsattr(bool, default=True) "This alarm is enabled?" ok_actions = wsme.wsattr([wtypes.text], default=[]) "The actions to do when alarm state change to ok" alarm_actions = wsme.wsattr([wtypes.text], default=[]) "The actions to do when alarm state change to alarm" insufficient_data_actions = wsme.wsattr([wtypes.text], default=[]) "The actions to do when alarm state change to insufficient data" repeat_actions = wsme.wsattr(bool, default=False) "The actions should be re-triggered on each evaluation cycle" type = base.AdvEnum('type', str, *ALARMS_RULES.names(), mandatory=True) "Explicit type specifier to select which rule to follow below." time_constraints = wtypes.wsattr([AlarmTimeConstraint], default=[]) """Describe time constraints for the alarm""" # These settings are ignored in the PUT or POST operations, but are # filled in for GET project_id = wtypes.text "The ID of the project or tenant that owns the alarm" user_id = wtypes.text "The ID of the user who created the alarm" timestamp = datetime.datetime "The date of the last alarm definition update" state = base.AdvEnum('state', str, *state_kind, default='insufficient data') "The state offset the alarm" state_timestamp = datetime.datetime "The date of the last alarm state changed" severity = base.AdvEnum('severity', str, *severity_kind, default='low') "The severity of the alarm" def __init__(self, rule=None, time_constraints=None, **kwargs): super(Alarm, self).__init__(**kwargs) if rule: setattr(self, '%s_rule' % self.type, ALARMS_RULES[self.type].plugin(**rule)) if time_constraints: self.time_constraints = [AlarmTimeConstraint(**tc) for tc in time_constraints] @staticmethod def validate(alarm): Alarm.check_rule(alarm) Alarm.check_alarm_actions(alarm) ALARMS_RULES[alarm.type].plugin.validate_alarm(alarm) if alarm.time_constraints: tc_names = [tc.name for tc in alarm.time_constraints] if len(tc_names) > len(set(tc_names)): error = _("Time constraint names must be " "unique for a given alarm.") raise base.ClientSideError(error) return alarm @staticmethod def check_rule(alarm): rule = '%s_rule' % alarm.type if getattr(alarm, rule) in (wtypes.Unset, None): error = _("%(rule)s must be set for %(type)s" " type alarm") % {"rule": rule, "type": alarm.type} raise base.ClientSideError(error) rule_set = None for ext in ALARMS_RULES: name = "%s_rule" % ext.name if getattr(alarm, name): if rule_set is None: rule_set = name else: error = _("%(rule1)s and %(rule2)s cannot be set at the " "same time") % {'rule1': rule_set, 'rule2': name} raise base.ClientSideError(error) @staticmethod def check_alarm_actions(alarm): max_actions = pecan.request.cfg.alarm_max_actions for state in state_kind: actions_name = state.replace(" ", "_") + '_actions' actions = getattr(alarm, actions_name) if not actions: continue action_set = set(actions) if len(actions) != len(action_set): LOG.info(_('duplicate actions are found: %s, ' 'remove duplicate ones') % actions) actions = list(action_set) setattr(alarm, actions_name, actions) if 0 < max_actions < len(actions): error = _('%(name)s count exceeds maximum value ' '%(maximum)d') % {"name": actions_name, "maximum": max_actions} raise base.ClientSideError(error) limited = rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer) for action in actions: try: url = netutils.urlsplit(action) except Exception: error = _("Unable to parse action %s") % action raise base.ClientSideError(error) if url.scheme not in ACTIONS_SCHEMA: error = _("Unsupported action %s") % action raise base.ClientSideError(error) if limited and url.scheme in ('log', 'test'): error = _('You are not authorized to create ' 'action: %s') % action raise base.ClientSideError(error, status_code=401) @classmethod def sample(cls): return cls(alarm_id=None, name="SwiftObjectAlarm", description="An alarm", type='combination', time_constraints=[AlarmTimeConstraint.sample().as_dict()], user_id="c96c887c216949acbdfbd8b494863567", project_id="c96c887c216949acbdfbd8b494863567", enabled=True, timestamp=datetime.datetime.utcnow(), state="ok", severity="moderate", state_timestamp=datetime.datetime.utcnow(), ok_actions=["http://site:8000/ok"], alarm_actions=["http://site:8000/alarm"], insufficient_data_actions=["http://site:8000/nodata"], repeat_actions=False, combination_rule=combination.AlarmCombinationRule.sample(), ) def as_dict(self, db_model): d = super(Alarm, self).as_dict(db_model) for k in d: if k.endswith('_rule'): del d[k] rule = getattr(self, "%s_rule" % self.type) d['rule'] = rule if isinstance(rule, dict) else rule.as_dict() if self.time_constraints: d['time_constraints'] = [tc.as_dict() for tc in self.time_constraints] return d @staticmethod def _is_trust_url(url): return url.scheme in ('trust+http', 'trust+https') def update_actions(self, old_alarm=None): trustor_user_id = pecan.request.headers.get('X-User-Id') trustor_project_id = pecan.request.headers.get('X-Project-Id') roles = pecan.request.headers.get('X-Roles', '') if roles: roles = roles.split(',') else: roles = [] auth_plugin = pecan.request.environ.get('keystone.token_auth') for actions in (self.ok_actions, self.alarm_actions, self.insufficient_data_actions): if actions is not None: for index, action in enumerate(actions[:]): url = netutils.urlsplit(action) if self._is_trust_url(url): if '@' not in url.netloc: # We have a trust action without a trust ID, # create it trust_id = keystone_client.create_trust_id( pecan.request.cfg, trustor_user_id, trustor_project_id, roles, auth_plugin) netloc = '%s:delete@%s' % (trust_id, url.netloc) url = list(url) url[1] = netloc actions[index] = urlparse.urlunsplit(url) if old_alarm: new_actions = list(itertools.chain( self.ok_actions or [], self.alarm_actions or [], self.insufficient_data_actions or [])) for action in itertools.chain( old_alarm.ok_actions or [], old_alarm.alarm_actions or [], old_alarm.insufficient_data_actions or []): if action not in new_actions: self.delete_trust(action) def delete_actions(self): for action in itertools.chain(self.ok_actions or [], self.alarm_actions or [], self.insufficient_data_actions or []): self.delete_trust(action) def delete_trust(self, action): auth_plugin = pecan.request.environ.get('keystone.token_auth') url = netutils.urlsplit(action) if self._is_trust_url(url) and url.password: keystone_client.delete_trust_id(url.username, auth_plugin) Alarm.add_attributes(**{"%s_rule" % ext.name: ext.plugin for ext in ALARMS_RULES}) class AlarmChange(base.Base): """Representation of an event in an alarm's history.""" event_id = wtypes.text "The UUID of the change event" alarm_id = wtypes.text "The UUID of the alarm" type = wtypes.Enum(str, 'creation', 'rule change', 'state transition', 'deletion') "The type of change" detail = wtypes.text "JSON fragment describing change" project_id = wtypes.text "The project ID of the initiating identity" user_id = wtypes.text "The user ID of the initiating identity" on_behalf_of = wtypes.text "The tenant on behalf of which the change is being made" timestamp = datetime.datetime "The time/date of the alarm change" @classmethod def sample(cls): return cls(alarm_id='e8ff32f772a44a478182c3fe1f7cad6a', type='rule change', detail='{"threshold": 42.0, "evaluation_periods": 4}', user_id="3e5d11fda79448ac99ccefb20be187ca", project_id="b6f16144010811e387e4de429e99ee8c", on_behalf_of="92159030020611e3b26dde429e99ee8c", timestamp=datetime.datetime.utcnow(), ) def _send_notification(event, payload): notification = event.replace(" ", "_") notification = "alarm.%s" % notification transport = messaging.get_transport(pecan.request.cfg) notifier = messaging.get_notifier(transport, publisher_id="aodh.api") # FIXME(sileht): perhaps we need to copy some infos from the # pecan request headers like nova does notifier.info({}, notification, payload) def stringify_timestamps(data): """Stringify any datetimes in given dict.""" return dict((k, v.isoformat() if isinstance(v, datetime.datetime) else v) for (k, v) in six.iteritems(data)) class AlarmController(rest.RestController): """Manages operations on a single alarm.""" _custom_actions = { 'history': ['GET'], 'state': ['PUT', 'GET'], } def __init__(self, alarm_id): pecan.request.context['alarm_id'] = alarm_id self._id = alarm_id def _alarm(self, rbac_directive): self.conn = pecan.request.alarm_storage_conn # TODO(sileht): We should be able to relax this since we # pass the alarm object to the enforcer. auth_project = rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer) alarms = list(self.conn.get_alarms(alarm_id=self._id, project=auth_project)) if not alarms: raise base.AlarmNotFound(alarm=self._id, auth_project=auth_project) alarm = alarms[0] target = {'user_id': alarm.user_id, 'project_id': alarm.project_id} rbac.enforce(rbac_directive, pecan.request.headers, pecan.request.enforcer, target) return alarm def _record_change(self, data, now, on_behalf_of=None, type=None): if not pecan.request.cfg.record_history: return if not data: return type = type or models.AlarmChange.RULE_CHANGE scrubbed_data = stringify_timestamps(data) detail = json.dumps(scrubbed_data) user_id = pecan.request.headers.get('X-User-Id') project_id = pecan.request.headers.get('X-Project-Id') on_behalf_of = on_behalf_of or project_id severity = scrubbed_data.get('severity') payload = dict(event_id=str(uuid.uuid4()), alarm_id=self._id, type=type, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=on_behalf_of, timestamp=now, severity=severity) try: self.conn.record_alarm_change(payload) except aodh.NotImplementedError: pass # Revert to the pre-json'ed details ... payload['detail'] = scrubbed_data _send_notification(type, payload) @wsme_pecan.wsexpose(Alarm) def get(self): """Return this alarm.""" return Alarm.from_db_model(self._alarm('get_alarm')) @wsme_pecan.wsexpose(Alarm, body=Alarm) def put(self, data): """Modify this alarm. :param data: an alarm within the request body. """ # Ensure alarm exists alarm_in = self._alarm('change_alarm') now = timeutils.utcnow() data.alarm_id = self._id user, project = rbac.get_limited_to(pecan.request.headers, pecan.request.enforcer) if user: data.user_id = user elif data.user_id == wtypes.Unset: data.user_id = alarm_in.user_id if project: data.project_id = project elif data.project_id == wtypes.Unset: data.project_id = alarm_in.project_id data.timestamp = now if alarm_in.state != data.state: data.state_timestamp = now else: data.state_timestamp = alarm_in.state_timestamp # make sure alarms are unique by name per project. if alarm_in.name != data.name: alarms = list(self.conn.get_alarms(name=data.name, project=data.project_id)) if alarms: raise base.ClientSideError( _("Alarm with name=%s exists") % data.name, status_code=409) ALARMS_RULES[data.type].plugin.update_hook(data) old_data = Alarm.from_db_model(alarm_in) old_alarm = old_data.as_dict(models.Alarm) data.update_actions(old_data) updated_alarm = data.as_dict(models.Alarm) try: alarm_in = models.Alarm(**updated_alarm) except Exception: LOG.exception(_("Error while putting alarm: %s") % updated_alarm) raise base.ClientSideError(_("Alarm incorrect")) alarm = self.conn.update_alarm(alarm_in) change = dict((k, v) for k, v in updated_alarm.items() if v != old_alarm[k] and k not in ['timestamp', 'state_timestamp']) self._record_change(change, now, on_behalf_of=alarm.project_id) return Alarm.from_db_model(alarm) @wsme_pecan.wsexpose(None, status_code=204) def delete(self): """Delete this alarm.""" # ensure alarm exists before deleting alarm = self._alarm('delete_alarm') self.conn.delete_alarm(alarm.alarm_id) alarm_object = Alarm.from_db_model(alarm) alarm_object.delete_actions() @wsme_pecan.wsexpose([AlarmChange], [base.Query]) def history(self, q=None): """Assembles the alarm history requested. :param q: Filter rules for the changes to be described. """ target = rbac.target_from_segregation_rule( pecan.request.headers, pecan.request.enforcer) rbac.enforce('alarm_history', pecan.request.headers, pecan.request.enforcer, target) q = q or [] # allow history to be returned for deleted alarms, but scope changes # returned to those carried out on behalf of the auth'd tenant, to # avoid inappropriate cross-tenant visibility of alarm history auth_project = rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer) conn = pecan.request.alarm_storage_conn kwargs = v2_utils.query_to_kwargs( q, conn.get_alarm_changes, ['on_behalf_of', 'alarm_id']) return [AlarmChange.from_db_model(ac) for ac in conn.get_alarm_changes(self._id, auth_project, **kwargs)] @wsme.validate(state_kind_enum) @wsme_pecan.wsexpose(state_kind_enum, body=state_kind_enum) def put_state(self, state): """Set the state of this alarm. :param state: an alarm state within the request body. """ alarm = self._alarm('change_alarm_state') # note(sileht): body are not validated by wsme # Workaround for https://bugs.launchpad.net/wsme/+bug/1227229 if state not in state_kind: raise base.ClientSideError(_("state invalid")) now = timeutils.utcnow() alarm.state = state alarm.state_timestamp = now alarm = self.conn.update_alarm(alarm) change = {'state': alarm.state} self._record_change(change, now, on_behalf_of=alarm.project_id, type=models.AlarmChange.STATE_TRANSITION) return alarm.state @wsme_pecan.wsexpose(state_kind_enum) def get_state(self): """Get the state of this alarm.""" return self._alarm('get_alarm_state').state class AlarmsController(rest.RestController): """Manages operations on the alarms collection.""" @pecan.expose() def _lookup(self, alarm_id, *remainder): return AlarmController(alarm_id), remainder @staticmethod def _record_creation(conn, data, alarm_id, now): if not pecan.request.cfg.record_history: return type = models.AlarmChange.CREATION scrubbed_data = stringify_timestamps(data) detail = json.dumps(scrubbed_data) user_id = pecan.request.headers.get('X-User-Id') project_id = pecan.request.headers.get('X-Project-Id') severity = scrubbed_data.get('severity') payload = dict(event_id=str(uuid.uuid4()), alarm_id=alarm_id, type=type, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=project_id, timestamp=now, severity=severity) try: conn.record_alarm_change(payload) except aodh.NotImplementedError: pass # Revert to the pre-json'ed details ... payload['detail'] = scrubbed_data _send_notification(type, payload) @wsme_pecan.wsexpose(Alarm, body=Alarm, status_code=201) def post(self, data): """Create a new alarm. :param data: an alarm within the request body. """ rbac.enforce('create_alarm', pecan.request.headers, pecan.request.enforcer, {}) conn = pecan.request.alarm_storage_conn now = timeutils.utcnow() data.alarm_id = str(uuid.uuid4()) user_limit, project_limit = rbac.get_limited_to(pecan.request.headers, pecan.request.enforcer) def _set_ownership(aspect, owner_limitation, header): attr = '%s_id' % aspect requested_owner = getattr(data, attr) explicit_owner = requested_owner != wtypes.Unset caller = pecan.request.headers.get(header) if (owner_limitation and explicit_owner and requested_owner != caller): raise base.ProjectNotAuthorized(requested_owner, aspect) actual_owner = (owner_limitation or requested_owner if explicit_owner else caller) setattr(data, attr, actual_owner) _set_ownership('user', user_limit, 'X-User-Id') _set_ownership('project', project_limit, 'X-Project-Id') # Check if there's room for one more alarm if is_over_quota(conn, data.project_id, data.user_id): raise OverQuota(data) data.timestamp = now data.state_timestamp = now ALARMS_RULES[data.type].plugin.create_hook(data) change = data.as_dict(models.Alarm) data.update_actions() # make sure alarms are unique by name per project. alarms = list(conn.get_alarms(name=data.name, project=data.project_id)) if alarms: raise base.ClientSideError( _("Alarm with name='%s' exists") % data.name, status_code=409) try: alarm_in = models.Alarm(**change) except Exception: LOG.exception(_("Error while posting alarm: %s") % change) raise base.ClientSideError(_("Alarm incorrect")) alarm = conn.create_alarm(alarm_in) self._record_creation(conn, change, alarm.alarm_id, now) v2_utils.set_resp_location_hdr("/v2/alarms/" + alarm.alarm_id) return Alarm.from_db_model(alarm) @wsme_pecan.wsexpose([Alarm], [base.Query]) def get_all(self, q=None): """Return all alarms, based on the query provided. :param q: Filter rules for the alarms to be returned. """ target = rbac.target_from_segregation_rule( pecan.request.headers, pecan.request.enforcer) rbac.enforce('get_alarms', pecan.request.headers, pecan.request.enforcer, target) q = q or [] # Timestamp is not supported field for Simple Alarm queries kwargs = v2_utils.query_to_kwargs( q, pecan.request.alarm_storage_conn.get_alarms, allow_timestamps=False) return [Alarm.from_db_model(m) for m in pecan.request.alarm_storage_conn.get_alarms(**kwargs)] aodh-2.0.6/aodh/api/controllers/v2/utils.py0000664000567000056710000003004013076064372021673 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import inspect from oslo_utils import timeutils import pecan import six from six.moves.urllib import parse as urllib_parse import wsme from aodh.api.controllers.v2 import base from aodh.api import rbac def get_auth_project(on_behalf_of=None): # when an alarm is created by an admin on behalf of another tenant # we must ensure for: # - threshold alarm, that an implicit query constraint on project_id is # added so that admin-level visibility on statistics is not leaked # - combination alarm, that alarm ids verification is scoped to # alarms owned by the alarm project. # hence for null auth_project (indicating admin-ness) we check if # the creating tenant differs from the tenant on whose behalf the # alarm is being created auth_project = rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer) created_by = pecan.request.headers.get('X-Project-Id') is_admin = auth_project is None if is_admin and on_behalf_of != created_by: auth_project = on_behalf_of return auth_project def sanitize_query(query, db_func, on_behalf_of=None): """Check the query. See if: 1) the request is coming from admin - then allow full visibility 2) non-admin - make sure that the query includes the requester's project. """ q = copy.copy(query) auth_project = get_auth_project(on_behalf_of) if auth_project: _verify_query_segregation(q, auth_project) proj_q = [i for i in q if i.field == 'project_id'] valid_keys = inspect.getargspec(db_func)[0] if not proj_q and 'on_behalf_of' not in valid_keys: # The user is restricted, but they didn't specify a project # so add it for them. q.append(base.Query(field='project_id', op='eq', value=auth_project)) return q def _verify_query_segregation(query, auth_project=None): """Ensure non-admin queries are not constrained to another project.""" auth_project = (auth_project or rbac.get_limited_to_project(pecan.request.headers, pecan.request.enforcer)) if not auth_project: return for q in query: if q.field in ('project', 'project_id') and auth_project != q.value: raise base.ProjectNotAuthorized(q.value) def validate_query(query, db_func, internal_keys=None, allow_timestamps=True): """Validates the syntax of the query and verifies the query. Verification check if the query request is authorized for the included project. :param query: Query expression that should be validated :param db_func: the function on the storage level, of which arguments will form the valid_keys list, which defines the valid fields for a query expression :param internal_keys: internally used field names, that should not be used for querying :param allow_timestamps: defines whether the timestamp-based constraint is applicable for this query or not :raises InvalidInput: if an operator is not supported for a given field :raises InvalidInput: if timestamp constraints are allowed, but search_offset was included without timestamp constraint :raises: UnknownArgument: if a field name is not a timestamp field, nor in the list of valid keys """ internal_keys = internal_keys or [] _verify_query_segregation(query) valid_keys = inspect.getargspec(db_func)[0] if 'alarm_type' in valid_keys: valid_keys.remove('alarm_type') valid_keys.append('type') internal_timestamp_keys = ['end_timestamp', 'start_timestamp', 'end_timestamp_op', 'start_timestamp_op'] if 'start_timestamp' in valid_keys: internal_keys += internal_timestamp_keys valid_keys += ['timestamp', 'search_offset'] internal_keys.append('self') internal_keys.append('metaquery') valid_keys = set(valid_keys) - set(internal_keys) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource'} has_timestamp_query = _validate_timestamp_fields(query, 'timestamp', ('lt', 'le', 'gt', 'ge'), allow_timestamps) has_search_offset_query = _validate_timestamp_fields(query, 'search_offset', 'eq', allow_timestamps) if has_search_offset_query and not has_timestamp_query: raise wsme.exc.InvalidInput('field', 'search_offset', "search_offset cannot be used without " + "timestamp") def _is_field_metadata(field): return (field.startswith('metadata.') or field.startswith('resource_metadata.')) for i in query: if i.field not in ('timestamp', 'search_offset'): key = translation.get(i.field, i.field) operator = i.op if key in valid_keys or _is_field_metadata(i.field): if operator == 'eq': if key == 'enabled': i._get_value_as_type('boolean') elif _is_field_metadata(key): i._get_value_as_type() else: raise wsme.exc.InvalidInput('op', i.op, 'unimplemented operator for ' '%s' % i.field) else: msg = ("unrecognized field in query: %s, " "valid keys: %s") % (query, sorted(valid_keys)) raise wsme.exc.UnknownArgument(key, msg) def _validate_timestamp_fields(query, field_name, operator_list, allow_timestamps): """Validates the timestamp related constraints in a query if there are any. :param query: query expression that may contain the timestamp fields :param field_name: timestamp name, which should be checked (timestamp, search_offset) :param operator_list: list of operators that are supported for that timestamp, which was specified in the parameter field_name :param allow_timestamps: defines whether the timestamp-based constraint is applicable to this query or not :returns: True, if there was a timestamp constraint, containing a timestamp field named as defined in field_name, in the query and it was allowed and syntactically correct. :returns: False, if there wasn't timestamp constraint, containing a timestamp field named as defined in field_name, in the query :raises InvalidInput: if an operator is unsupported for a given timestamp field :raises UnknownArgument: if the timestamp constraint is not allowed in the query """ for item in query: if item.field == field_name: # If *timestamp* or *search_offset* field was specified in the # query, but timestamp is not supported on that resource, on # which the query was invoked, then raise an exception. if not allow_timestamps: raise wsme.exc.UnknownArgument(field_name, "not valid for " + "this resource") if item.op not in operator_list: raise wsme.exc.InvalidInput('op', item.op, 'unimplemented operator for %s' % item.field) return True return False def query_to_kwargs(query, db_func, internal_keys=None, allow_timestamps=True): validate_query(query, db_func, internal_keys=internal_keys, allow_timestamps=allow_timestamps) query = sanitize_query(query, db_func) translation = {'user_id': 'user', 'project_id': 'project', 'resource_id': 'resource', 'type': 'alarm_type'} stamp = {} kwargs = {} for i in query: if i.field == 'timestamp': if i.op in ('lt', 'le'): stamp['end_timestamp'] = i.value stamp['end_timestamp_op'] = i.op elif i.op in ('gt', 'ge'): stamp['start_timestamp'] = i.value stamp['start_timestamp_op'] = i.op else: if i.op == 'eq': if i.field == 'search_offset': stamp['search_offset'] = i.value elif i.field == 'enabled': kwargs[i.field] = i._get_value_as_type('boolean') else: key = translation.get(i.field, i.field) kwargs[key] = i.value if stamp: kwargs.update(_get_query_timestamps(stamp)) return kwargs def _get_query_timestamps(args=None): """Return any optional timestamp information in the request. Determine the desired range, if any, from the GET arguments. Set up the query range using the specified offset. [query_start ... start_timestamp ... end_timestamp ... query_end] Returns a dictionary containing: start_timestamp: First timestamp to use for query start_timestamp_op: First timestamp operator to use for query end_timestamp: Final timestamp to use for query end_timestamp_op: Final timestamp operator to use for query """ if args is None: return {} search_offset = int(args.get('search_offset', 0)) def _parse_timestamp(timestamp): if not timestamp: return None try: iso_timestamp = timeutils.parse_isotime(timestamp) iso_timestamp = iso_timestamp.replace(tzinfo=None) except ValueError: raise wsme.exc.InvalidInput('timestamp', timestamp, 'invalid timestamp format') return iso_timestamp start_timestamp = _parse_timestamp(args.get('start_timestamp')) end_timestamp = _parse_timestamp(args.get('end_timestamp')) start_timestamp = start_timestamp - datetime.timedelta( minutes=search_offset) if start_timestamp else None end_timestamp = end_timestamp + datetime.timedelta( minutes=search_offset) if end_timestamp else None return {'start_timestamp': start_timestamp, 'end_timestamp': end_timestamp, 'start_timestamp_op': args.get('start_timestamp_op'), 'end_timestamp_op': args.get('end_timestamp_op')} def set_resp_location_hdr(location): location = '%s%s' % (pecan.request.script_name, location) # NOTE(sileht): according the pep-3333 the headers must be # str in py2 and py3 even this is not the same thing in both # version # see: http://legacy.python.org/dev/peps/pep-3333/#unicode-issues if six.PY2 and isinstance(location, six.text_type): location = location.encode('utf-8') location = urllib_parse.quote(location) pecan.response.headers['Location'] = location aodh-2.0.6/aodh/api/controllers/v2/root.py0000664000567000056710000000220413076064371021516 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from aodh.api.controllers.v2 import alarms from aodh.api.controllers.v2 import capabilities from aodh.api.controllers.v2 import query class V2Controller(object): """Version 2 API controller root.""" alarms = alarms.AlarmsController() query = query.QueryController() capabilities = capabilities.CapabilitiesController() aodh-2.0.6/aodh/api/controllers/v2/query.py0000664000567000056710000003544713076064372021720 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import jsonschema from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from aodh.api.controllers.v2 import alarms from aodh.api.controllers.v2 import base from aodh.api import rbac from aodh.i18n import _ from aodh.storage import models LOG = log.getLogger(__name__) class ComplexQuery(base.Base): """Holds a sample query encoded in json.""" filter = wtypes.text "The filter expression encoded in json." orderby = wtypes.text "List of single-element dicts for specifing the ordering of the results." limit = int "The maximum number of results to be returned." @classmethod def sample(cls): return cls(filter='{"and": [{"and": [{"=": ' + '{"counter_name": "cpu_util"}}, ' + '{">": {"counter_volume": 0.23}}, ' + '{"<": {"counter_volume": 0.26}}]}, ' + '{"or": [{"and": [{">": ' + '{"timestamp": "2013-12-01T18:00:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:15:00"}}]}, ' + '{"and": [{">": ' + '{"timestamp": "2013-12-01T18:30:00"}}, ' + '{"<": ' + '{"timestamp": "2013-12-01T18:45:00"}}]}]}]}', orderby='[{"counter_volume": "ASC"}, ' + '{"timestamp": "DESC"}]', limit=42 ) def _list_to_regexp(items, regexp_prefix=""): regexp = ["^%s$" % item for item in items] regexp = regexp_prefix + "|".join(regexp) return regexp class ValidatedComplexQuery(object): complex_operators = ["and", "or"] order_directions = ["asc", "desc"] simple_ops = ["=", "!=", "<", ">", "<=", "=<", ">=", "=>", "=~"] regexp_prefix = "(?i)" complex_ops = _list_to_regexp(complex_operators, regexp_prefix) simple_ops = _list_to_regexp(simple_ops, regexp_prefix) order_directions = _list_to_regexp(order_directions, regexp_prefix) timestamp_fields = ["timestamp", "state_timestamp"] def __init__(self, query, db_model, additional_name_mapping=None, metadata_allowed=False): additional_name_mapping = additional_name_mapping or {} self.name_mapping = {"user": "user_id", "project": "project_id"} self.name_mapping.update(additional_name_mapping) valid_keys = db_model.get_field_names() valid_keys = list(valid_keys) + list(self.name_mapping.keys()) valid_fields = _list_to_regexp(valid_keys) if metadata_allowed: valid_filter_fields = valid_fields + "|^metadata\.[\S]+$" else: valid_filter_fields = valid_fields schema_value = { "oneOf": [{"type": "string"}, {"type": "number"}, {"type": "boolean"}], "minProperties": 1, "maxProperties": 1} schema_value_in = { "type": "array", "items": {"oneOf": [{"type": "string"}, {"type": "number"}]}, "minItems": 1} schema_field = { "type": "object", "patternProperties": {valid_filter_fields: schema_value}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_field_in = { "type": "object", "patternProperties": {valid_filter_fields: schema_value_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_in = { "type": "object", "patternProperties": {"(?i)^in$": schema_field_in}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_leaf_simple_ops = { "type": "object", "patternProperties": {self.simple_ops: schema_field}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_and_or_array = { "type": "array", "items": {"$ref": "#"}, "minItems": 2} schema_and_or = { "type": "object", "patternProperties": {self.complex_ops: schema_and_or_array}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} schema_not = { "type": "object", "patternProperties": {"(?i)^not$": {"$ref": "#"}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1} self.schema = { "oneOf": [{"$ref": "#/definitions/leaf_simple_ops"}, {"$ref": "#/definitions/leaf_in"}, {"$ref": "#/definitions/and_or"}, {"$ref": "#/definitions/not"}], "minProperties": 1, "maxProperties": 1, "definitions": {"leaf_simple_ops": schema_leaf_simple_ops, "leaf_in": schema_leaf_in, "and_or": schema_and_or, "not": schema_not}} self.orderby_schema = { "type": "array", "items": { "type": "object", "patternProperties": {valid_fields: {"type": "string", "pattern": self.order_directions}}, "additionalProperties": False, "minProperties": 1, "maxProperties": 1}} self.original_query = query def validate(self, visibility_field): """Validates the query content and does the necessary conversions.""" if self.original_query.filter is wtypes.Unset: self.filter_expr = None else: try: self.filter_expr = json.loads(self.original_query.filter) self._validate_filter(self.filter_expr) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Filter expression not valid: %s") % e.message) self._replace_isotime_with_datetime(self.filter_expr) self._convert_operator_to_lower_case(self.filter_expr) self._normalize_field_names_for_db_model(self.filter_expr) self._force_visibility(visibility_field) if self.original_query.orderby is wtypes.Unset: self.orderby = None else: try: self.orderby = json.loads(self.original_query.orderby) self._validate_orderby(self.orderby) except (ValueError, jsonschema.exceptions.ValidationError) as e: raise base.ClientSideError( _("Order-by expression not valid: %s") % e) self._convert_orderby_to_lower_case(self.orderby) self._normalize_field_names_in_orderby(self.orderby) if self.original_query.limit is wtypes.Unset: self.limit = None else: self.limit = self.original_query.limit if self.limit is not None and self.limit <= 0: msg = _('Limit should be positive') raise base.ClientSideError(msg) @staticmethod def lowercase_values(mapping): """Converts the values in the mapping dict to lowercase.""" items = mapping.items() for key, value in items: mapping[key] = value.lower() def _convert_orderby_to_lower_case(self, orderby): for orderby_field in orderby: self.lowercase_values(orderby_field) def _normalize_field_names_in_orderby(self, orderby): for orderby_field in orderby: self._replace_field_names(orderby_field) def _traverse_postorder(self, tree, visitor): op = list(tree.keys())[0] if op.lower() in self.complex_operators: for i, operand in enumerate(tree[op]): self._traverse_postorder(operand, visitor) if op.lower() == "not": self._traverse_postorder(tree[op], visitor) visitor(tree) def _check_cross_project_references(self, own_project_id, visibility_field): """Do not allow other than own_project_id.""" def check_project_id(subfilter): op, value = list(subfilter.items())[0] if (op.lower() not in self.complex_operators and list(value.keys())[0] == visibility_field and value[visibility_field] != own_project_id): raise base.ProjectNotAuthorized(value[visibility_field]) self._traverse_postorder(self.filter_expr, check_project_id) def _force_visibility(self, visibility_field): """Force visibility field. If the tenant is not admin insert an extra "and =" clause to the query. """ authorized_project = rbac.get_limited_to_project( pecan.request.headers, pecan.request.enforcer) is_admin = authorized_project is None if not is_admin: self._restrict_to_project(authorized_project, visibility_field) self._check_cross_project_references(authorized_project, visibility_field) def _restrict_to_project(self, project_id, visibility_field): restriction = {"=": {visibility_field: project_id}} if self.filter_expr is None: self.filter_expr = restriction else: self.filter_expr = {"and": [restriction, self.filter_expr]} def _replace_isotime_with_datetime(self, filter_expr): def replace_isotime(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: field = list(value.keys())[0] if field in self.timestamp_fields: date_time = self._convert_to_datetime(subfilter[op][field]) subfilter[op][field] = date_time self._traverse_postorder(filter_expr, replace_isotime) def _normalize_field_names_for_db_model(self, filter_expr): def _normalize_field_names(subfilter): op, value = list(subfilter.items())[0] if op.lower() not in self.complex_operators: self._replace_field_names(value) self._traverse_postorder(filter_expr, _normalize_field_names) def _replace_field_names(self, subfilter): field, value = list(subfilter.items())[0] if field in self.name_mapping: del subfilter[field] subfilter[self.name_mapping[field]] = value if field.startswith("metadata."): del subfilter[field] subfilter["resource_" + field] = value @staticmethod def lowercase_keys(mapping): """Converts the values of the keys in mapping to lowercase.""" items = mapping.items() for key, value in items: del mapping[key] mapping[key.lower()] = value def _convert_operator_to_lower_case(self, filter_expr): self._traverse_postorder(filter_expr, self.lowercase_keys) @staticmethod def _convert_to_datetime(isotime): try: date_time = timeutils.parse_isotime(isotime) date_time = date_time.replace(tzinfo=None) return date_time except ValueError: LOG.exception(_("String %s is not a valid isotime") % isotime) msg = _('Failed to parse the timestamp value %s') % isotime raise base.ClientSideError(msg) def _validate_filter(self, filter_expr): jsonschema.validate(filter_expr, self.schema) def _validate_orderby(self, orderby_expr): jsonschema.validate(orderby_expr, self.orderby_schema) class QueryAlarmHistoryController(rest.RestController): """Provides complex query possibilities for alarm history.""" @wsme_pecan.wsexpose([alarms.AlarmChange], body=ComplexQuery) def post(self, body): """Define query for retrieving AlarmChange data. :param body: Query rules for the alarm history to be returned. """ target = rbac.target_from_segregation_rule( pecan.request.headers, pecan.request.enforcer) rbac.enforce('query_alarm_history', pecan.request.headers, pecan.request.enforcer, target) query = ValidatedComplexQuery(body, models.AlarmChange) query.validate(visibility_field="on_behalf_of") conn = pecan.request.alarm_storage_conn return [alarms.AlarmChange.from_db_model(s) for s in conn.query_alarm_history(query.filter_expr, query.orderby, query.limit)] class QueryAlarmsController(rest.RestController): """Provides complex query possibilities for alarms.""" history = QueryAlarmHistoryController() @wsme_pecan.wsexpose([alarms.Alarm], body=ComplexQuery) def post(self, body): """Define query for retrieving Alarm data. :param body: Query rules for the alarms to be returned. """ target = rbac.target_from_segregation_rule( pecan.request.headers, pecan.request.enforcer) rbac.enforce('query_alarm', pecan.request.headers, pecan.request.enforcer, target) query = ValidatedComplexQuery(body, models.Alarm) query.validate(visibility_field="project_id") conn = pecan.request.alarm_storage_conn return [alarms.Alarm.from_db_model(s) for s in conn.query_alarms(query.filter_expr, query.orderby, query.limit)] class QueryController(rest.RestController): alarms = QueryAlarmsController() aodh-2.0.6/aodh/api/controllers/v2/alarm_rules/0000775000567000056710000000000013076064720022467 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/api/controllers/v2/alarm_rules/threshold.py0000664000567000056710000001116313076064372025042 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.api.controllers.v2 import utils as v2_utils from aodh.i18n import _ from aodh import storage class AlarmThresholdRule(base.AlarmRule): """Alarm Threshold Rule Describe when to trigger the alarm based on computed statistics """ meter_name = wsme.wsattr(wtypes.text, mandatory=True) "The name of the meter" # FIXME(sileht): default doesn't work # workaround: default is set in validate method query = wsme.wsattr([base.Query], default=[]) """The query to find the data for computing statistics. Ownership settings are automatically included based on the Alarm owner. """ period = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60) "The time range in seconds over which query" comparison_operator = base.AdvEnum('comparison_operator', str, 'lt', 'le', 'eq', 'ne', 'ge', 'gt', default='eq') "The comparison against the alarm threshold" threshold = wsme.wsattr(float, mandatory=True) "The threshold of the alarm" statistic = base.AdvEnum('statistic', str, 'max', 'min', 'avg', 'sum', 'count', default='avg') "The statistic to compare to the threshold" evaluation_periods = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) "The number of historical periods to evaluate the threshold" exclude_outliers = wsme.wsattr(bool, default=False) "Whether datapoints with anomalously low sample counts are excluded" def __init__(self, query=None, **kwargs): query = [base.Query(**q) for q in query] if query else [] super(AlarmThresholdRule, self).__init__(query=query, **kwargs) @staticmethod def validate(threshold_rule): # note(sileht): wsme default doesn't work in some case # workaround for https://bugs.launchpad.net/wsme/+bug/1227039 if not threshold_rule.query: threshold_rule.query = [] # Timestamp is not allowed for AlarmThresholdRule query, as the alarm # evaluator will construct timestamp bounds for the sequence of # statistics queries as the sliding evaluation window advances # over time. v2_utils.validate_query(threshold_rule.query, storage.SampleFilter.__init__, allow_timestamps=False) return threshold_rule @staticmethod def validate_alarm(alarm): # ensure an implicit constraint on project_id is added to # the query if not already present alarm.threshold_rule.query = v2_utils.sanitize_query( alarm.threshold_rule.query, storage.SampleFilter.__init__, on_behalf_of=alarm.project_id ) @property def default_description(self): return (_('Alarm when %(meter_name)s is %(comparison_operator)s a ' '%(statistic)s of %(threshold)s over %(period)s seconds') % dict(comparison_operator=self.comparison_operator, statistic=self.statistic, threshold=self.threshold, meter_name=self.meter_name, period=self.period)) def as_dict(self): rule = self.as_dict_from_keys(['period', 'comparison_operator', 'threshold', 'statistic', 'evaluation_periods', 'meter_name', 'exclude_outliers']) rule['query'] = [q.as_dict() for q in self.query] return rule @classmethod def sample(cls): return cls(meter_name='cpu_util', period=60, evaluation_periods=1, threshold=300.0, statistic='avg', comparison_operator='gt', query=[{'field': 'resource_id', 'value': '2a4d689b-f0b8-49c1-9eef-87cae58d80db', 'op': 'eq', 'type': 'string'}]) aodh-2.0.6/aodh/api/controllers/v2/alarm_rules/event.py0000664000567000056710000000371213076064371024167 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.i18n import _ class AlarmEventRule(base.AlarmRule): """Alarm Event Rule. Describe when to trigger the alarm based on an event """ event_type = wsme.wsattr(wtypes.text) "The type of event (default is '*')" query = wsme.wsattr([base.Query]) "The query to find the event (default is [])" def __init__(self, event_type=None, query=None): event_type = event_type or '*' query = [base.Query(**q) for q in query or []] super(AlarmEventRule, self).__init__(event_type=event_type, query=query) @classmethod def validate_alarm(cls, alarm): for i in alarm.event_rule.query: i._get_value_as_type() @property def default_description(self): return _('Alarm when %s event occurred.') % self.event_type def as_dict(self): rule = self.as_dict_from_keys(['event_type']) rule['query'] = [q.as_dict() for q in self.query] return rule @classmethod def sample(cls): return cls(event_type='compute.instance.update', query=[{'field': 'traits.instance_id"', 'value': '153462d0-a9b8-4b5b-8175-9e4b05e9b856', 'op': 'eq', 'type': 'string'}]) aodh-2.0.6/aodh/api/controllers/v2/alarm_rules/composite.py0000664000567000056710000000765413076064371025061 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json from stevedore import named from wsme.rest import json as wjson from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.i18n import _ class InvalidCompositeRule(base.ClientSideError): def __init__(self, error): err = _('Invalid input composite rule: %s, it should ' 'be a dict with an "and" or "or" as key, and the ' 'value of dict should be a list of basic threshold ' 'rules or sub composite rules, can be nested.') % error super(InvalidCompositeRule, self).__init__(err) class CompositeRule(wtypes.UserType): """Composite alarm rule. A simple dict type to preset composite rule. """ basetype = wtypes.text name = 'composite_rule' threshold_plugins = None def __init__(self): threshold_rules = ('threshold', 'gnocchi_resources_threshold', 'gnocchi_aggregation_by_metrics_threshold', 'gnocchi_aggregation_by_resources_threshold') CompositeRule.threshold_plugins = named.NamedExtensionManager( "aodh.alarm.rule", threshold_rules) super(CompositeRule, self).__init__() @staticmethod def valid_composite_rule(rules): if isinstance(rules, dict) and len(rules) == 1: and_or_key = list(rules)[0] if and_or_key not in ('and', 'or'): raise base.ClientSideError( _('Threshold rules should be combined with "and" or "or"')) if isinstance(rules[and_or_key], list): for sub_rule in rules[and_or_key]: CompositeRule.valid_composite_rule(sub_rule) else: raise InvalidCompositeRule(rules) elif isinstance(rules, dict): rule_type = rules.pop('type', None) if not rule_type: raise base.ClientSideError(_('type must be set in every rule')) if rule_type not in CompositeRule.threshold_plugins: plugins = sorted(CompositeRule.threshold_plugins.names()) err = _('Unsupported sub-rule type :%(rule)s in composite ' 'rule, should be one of: %(plugins)s') % { 'rule': rule_type, 'plugins': plugins} raise base.ClientSideError(err) plugin = CompositeRule.threshold_plugins[rule_type].plugin wjson.fromjson(plugin, rules) rule_dict = plugin(**rules).as_dict() rules.update(rule_dict) rules.update(type=rule_type) else: raise InvalidCompositeRule(rules) @staticmethod def validate(value): try: json.dumps(value) except TypeError: raise base.ClientSideError(_('%s is not JSON serializable') % value) else: CompositeRule.valid_composite_rule(value) return value @staticmethod def frombasetype(value): return CompositeRule.validate(value) @staticmethod def create_hook(alarm): pass @staticmethod def validate_alarm(alarm): pass @staticmethod def update_hook(alarm): pass @staticmethod def as_dict(): pass @staticmethod def __call__(**rule): return rule composite_rule = CompositeRule() aodh-2.0.6/aodh/api/controllers/v2/alarm_rules/combination.py0000664000567000056710000000535213076064372025353 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.api.controllers.v2 import utils as v2_utils from aodh.i18n import _ class AlarmCombinationRule(base.AlarmRule): """Alarm Combination Rule Describe when to trigger the alarm based on combining the state of other alarms. """ operator = base.AdvEnum('operator', str, 'or', 'and', default='and') "How to combine the sub-alarms" alarm_ids = wsme.wsattr([wtypes.text], mandatory=True) "List of alarm identifiers to combine" @property def default_description(self): joiner = ' %s ' % self.operator return _('Combined state of alarms %s') % joiner.join(self.alarm_ids) def as_dict(self): return self.as_dict_from_keys(['operator', 'alarm_ids']) @staticmethod def validate(rule): rule.alarm_ids = sorted(set(rule.alarm_ids), key=rule.alarm_ids.index) if len(rule.alarm_ids) <= 1: raise base.ClientSideError(_('Alarm combination rule should ' 'contain at least two different ' 'alarm ids.')) return rule @staticmethod def validate_alarm(alarm): project = v2_utils.get_auth_project( alarm.project_id if alarm.project_id != wtypes.Unset else None) for id in alarm.combination_rule.alarm_ids: alarms = list(pecan.request.alarm_storage_conn.get_alarms( alarm_id=id, project=project)) if not alarms: raise base.AlarmNotFound(id, project) @staticmethod def update_hook(alarm): # should check if there is any circle in the dependency, but for # efficiency reason, here only check alarm cannot depend on itself if alarm.alarm_id in alarm.combination_rule.alarm_ids: raise base.ClientSideError( _('Cannot specify alarm %s itself in combination rule') % alarm.alarm_id) @classmethod def sample(cls): return cls(operator='or', alarm_ids=['739e99cb-c2ec-4718-b900-332502355f38', '153462d0-a9b8-4b5b-8175-9e4b05e9b856']) aodh-2.0.6/aodh/api/controllers/v2/alarm_rules/__init__.py0000664000567000056710000000000013076064371024570 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/api/controllers/v2/alarm_rules/gnocchi.py0000664000567000056710000001622113076064372024460 0ustar jenkinsjenkins00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from gnocchiclient import client from gnocchiclient import exceptions from oslo_serialization import jsonutils import pecan import wsme from wsme import types as wtypes from aodh.api.controllers.v2 import base from aodh.api.controllers.v2 import utils as v2_utils from aodh import keystone_client class GnocchiUnavailable(Exception): code = 503 class AlarmGnocchiThresholdRule(base.AlarmRule): comparison_operator = base.AdvEnum('comparison_operator', str, 'lt', 'le', 'eq', 'ne', 'ge', 'gt', default='eq') "The comparison against the alarm threshold" threshold = wsme.wsattr(float, mandatory=True) "The threshold of the alarm" aggregation_method = wsme.wsattr(wtypes.text, mandatory=True) "The aggregation_method to compare to the threshold" evaluation_periods = wsme.wsattr(wtypes.IntegerType(minimum=1), default=1) "The number of historical periods to evaluate the threshold" granularity = wsme.wsattr(wtypes.IntegerType(minimum=1), default=60) "The time range in seconds over which query" @classmethod def validate_alarm(cls, alarm): alarm_rule = getattr(alarm, "%s_rule" % alarm.type) aggregation_method = alarm_rule.aggregation_method if aggregation_method not in cls._get_aggregation_methods(): raise base.ClientSideError( 'aggregation_method should be in %s not %s' % ( cls._get_aggregation_methods(), aggregation_method)) # NOTE(sileht): once cachetools is in the requirements # enable it # @cachetools.ttl_cache(maxsize=1, ttl=600) @staticmethod def _get_aggregation_methods(): conf = pecan.request.cfg gnocchi_client = client.Client( '1', keystone_client.get_session(conf), interface=conf.service_credentials.interface, region_name=conf.service_credentials.region_name, endpoint_override=conf.gnocchi_url) try: return gnocchi_client.capabilities.list().get( 'aggregation_methods', []) except exceptions.ClientException as e: raise base.ClientSideError(e.message, status_code=e.code) except Exception as e: raise GnocchiUnavailable(e) class MetricOfResourceRule(AlarmGnocchiThresholdRule): metric = wsme.wsattr(wtypes.text, mandatory=True) "The name of the metric" resource_id = wsme.wsattr(wtypes.text, mandatory=True) "The id of a resource" resource_type = wsme.wsattr(wtypes.text, mandatory=True) "The resource type" def as_dict(self): rule = self.as_dict_from_keys(['granularity', 'comparison_operator', 'threshold', 'aggregation_method', 'evaluation_periods', 'metric', 'resource_id', 'resource_type']) return rule @classmethod def validate_alarm(cls, alarm): super(MetricOfResourceRule, cls).validate_alarm(alarm) conf = pecan.request.cfg gnocchi_client = client.Client( '1', keystone_client.get_session(conf), interface=conf.service_credentials.interface, region_name=conf.service_credentials.region_name, endpoint_override=conf.gnocchi_url) rule = alarm.gnocchi_resources_threshold_rule try: gnocchi_client.resource.get(rule.resource_type, rule.resource_id) except exceptions.ClientException as e: raise base.ClientSideError(e.message, status_code=e.code) except Exception as e: raise GnocchiUnavailable(e) class AggregationMetricByResourcesLookupRule(AlarmGnocchiThresholdRule): metric = wsme.wsattr(wtypes.text, mandatory=True) "The name of the metric" query = wsme.wsattr(wtypes.text, mandatory=True) ('The query to filter the metric, Don\'t forget to filter out ' 'deleted resources (example: {"and": [{"=": {"ended_at": null}}, ...]}), ' 'Otherwise Gnocchi will try to create the aggregate against obsolete ' 'resources') resource_type = wsme.wsattr(wtypes.text, mandatory=True) "The resource type" def as_dict(self): rule = self.as_dict_from_keys(['granularity', 'comparison_operator', 'threshold', 'aggregation_method', 'evaluation_periods', 'metric', 'query', 'resource_type']) return rule @classmethod def validate_alarm(cls, alarm): super(AggregationMetricByResourcesLookupRule, cls).validate_alarm(alarm) rule = alarm.gnocchi_aggregation_by_resources_threshold_rule # check the query string is a valid json try: query = jsonutils.loads(rule.query) except ValueError: raise wsme.exc.InvalidInput('rule/query', rule.query) # Scope the alarm to the project id if needed auth_project = v2_utils.get_auth_project(alarm.project_id) if auth_project: query = {"and": [{"=": {"created_by_project_id": auth_project}}, query]} rule.query = jsonutils.dumps(query) conf = pecan.request.cfg gnocchi_client = client.Client( '1', keystone_client.get_session(conf), interface=conf.service_credentials.interface, region_name=conf.service_credentials.region_name, endpoint_override=conf.gnocchi_url) try: gnocchi_client.metric.aggregation( metrics=rule.metric, query=query, aggregation=rule.aggregation_method, needed_overlap=0, resource_type=rule.resource_type) except exceptions.ClientException as e: raise base.ClientSideError(e.message, status_code=e.code) except Exception as e: raise GnocchiUnavailable(e) class AggregationMetricsByIdLookupRule(AlarmGnocchiThresholdRule): metrics = wsme.wsattr([wtypes.text], mandatory=True) "A list of metric Ids" def as_dict(self): rule = self.as_dict_from_keys(['granularity', 'comparison_operator', 'threshold', 'aggregation_method', 'evaluation_periods', 'metrics']) return rule aodh-2.0.6/aodh/api/controllers/v2/__init__.py0000664000567000056710000000000013076064371022262 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/api/controllers/v2/base.py0000664000567000056710000002065413076064372021457 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 IBM Corp. # Copyright 2013 eNovance # Copyright Ericsson AB 2013. All rights reserved # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import ast import datetime import functools import inspect from oslo_utils import strutils from oslo_utils import timeutils import pecan import six import wsme from wsme import types as wtypes from aodh.i18n import _ operation_kind = ('lt', 'le', 'eq', 'ne', 'ge', 'gt') operation_kind_enum = wtypes.Enum(str, *operation_kind) class ClientSideError(wsme.exc.ClientSideError): def __init__(self, error, status_code=400): pecan.response.translatable_error = error super(ClientSideError, self).__init__(error, status_code) class EntityNotFound(ClientSideError): def __init__(self, entity, id): super(EntityNotFound, self).__init__( _("%(entity)s %(id)s Not Found") % {'entity': entity, 'id': id}, status_code=404) class ProjectNotAuthorized(ClientSideError): def __init__(self, id, aspect='project'): params = dict(aspect=aspect, id=id) super(ProjectNotAuthorized, self).__init__( _("Not Authorized to access %(aspect)s %(id)s") % params, status_code=401) class AdvEnum(wtypes.wsproperty): """Handle default and mandatory for wtypes.Enum.""" def __init__(self, name, *args, **kwargs): self._name = '_advenum_%s' % name self._default = kwargs.pop('default', None) mandatory = kwargs.pop('mandatory', False) enum = wtypes.Enum(*args, **kwargs) super(AdvEnum, self).__init__(datatype=enum, fget=self._get, fset=self._set, mandatory=mandatory) def _get(self, parent): if hasattr(parent, self._name): value = getattr(parent, self._name) return value or self._default return self._default def _set(self, parent, value): try: if self.datatype.validate(value): setattr(parent, self._name, value) except ValueError as e: raise wsme.exc.InvalidInput(self._name.replace('_advenum_', '', 1), value, e) class Base(wtypes.DynamicBase): @classmethod def from_db_model(cls, m): return cls(**(m.as_dict())) @classmethod def from_db_and_links(cls, m, links): return cls(links=links, **(m.as_dict())) def as_dict(self, db_model): valid_keys = inspect.getargspec(db_model.__init__)[0] if 'self' in valid_keys: valid_keys.remove('self') return self.as_dict_from_keys(valid_keys) def as_dict_from_keys(self, keys): return dict((k, getattr(self, k)) for k in keys if hasattr(self, k) and getattr(self, k) != wsme.Unset) class Query(Base): """Query filter.""" # The data types supported by the query. _supported_types = ['integer', 'float', 'string', 'boolean', 'datetime'] # Functions to convert the data field to the correct type. _type_converters = {'integer': int, 'float': float, 'boolean': functools.partial( strutils.bool_from_string, strict=True), 'string': six.text_type, 'datetime': timeutils.parse_isotime} _op = None # provide a default def get_op(self): return self._op or 'eq' def set_op(self, value): self._op = value field = wsme.wsattr(wtypes.text, mandatory=True) "The name of the field to test" # op = wsme.wsattr(operation_kind, default='eq') # this ^ doesn't seem to work. op = wsme.wsproperty(operation_kind_enum, get_op, set_op) "The comparison operator. Defaults to 'eq'." value = wsme.wsattr(wtypes.text, mandatory=True) "The value to compare against the stored data" type = wtypes.text "The data type of value to compare against the stored data" def __repr__(self): # for logging calls return '' % (self.field, self.op, self.value, self.type) @classmethod def sample(cls): return cls(field='resource_id', op='eq', value='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36', type='string' ) def as_dict(self): return self.as_dict_from_keys(['field', 'op', 'type', 'value']) def _get_value_as_type(self, forced_type=None): """Convert metadata value to the specified data type. This method is called during metadata query to help convert the querying metadata to the data type specified by user. If there is no data type given, the metadata will be parsed by ast.literal_eval to try to do a smart converting. NOTE (flwang) Using "_" as prefix to avoid an InvocationError raised from wsmeext/sphinxext.py. It's OK to call it outside the Query class. Because the "public" side of that class is actually the outside of the API, and the "private" side is the API implementation. The method is only used in the API implementation, so it's OK. :returns: metadata value converted with the specified data type. """ type = forced_type or self.type try: converted_value = self.value if not type: try: converted_value = ast.literal_eval(self.value) except (ValueError, SyntaxError): # Unable to convert the metadata value automatically # let it default to self.value pass else: if type not in self._supported_types: # Types must be explicitly declared so the # correct type converter may be used. Subclasses # of Query may define _supported_types and # _type_converters to define their own types. raise TypeError() converted_value = self._type_converters[type](self.value) if isinstance(converted_value, datetime.datetime): converted_value = timeutils.normalize_time(converted_value) except ValueError: msg = (_('Unable to convert the value %(value)s' ' to the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) except TypeError: msg = (_('The data type %(type)s is not supported. The supported' ' data type list is: %(supported)s') % {'type': type, 'supported': self._supported_types}) raise ClientSideError(msg) except Exception: msg = (_('Unexpected exception converting %(value)s to' ' the expected data type %(type)s.') % {'value': self.value, 'type': type}) raise ClientSideError(msg) return converted_value class AlarmNotFound(ClientSideError): def __init__(self, alarm, auth_project): if not auth_project: msg = _('Alarm %s not found') % alarm else: msg = _('Alarm %(alarm_id)s not found in project %' '(project)s') % { 'alarm_id': alarm, 'project': auth_project} super(AlarmNotFound, self).__init__(msg, status_code=404) class AlarmRule(Base): """Base class Alarm Rule extension and wsme.types.""" @staticmethod def validate_alarm(alarm): pass @staticmethod def create_hook(alarm): pass @staticmethod def update_hook(alarm): pass aodh-2.0.6/aodh/api/middleware.py0000664000567000056710000001230213076064372017754 0ustar jenkinsjenkins00000000000000# # Copyright 2013 IBM Corp. # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ import json from lxml import etree from oslo_log import log import six import webob from aodh import i18n from aodh.i18n import _ LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" @staticmethod def best_match_language(accept_language): """Determines best available locale from the Accept-Language header. :returns: the best language match or None if the 'Accept-Language' header was not available in the request. """ if not accept_language: return None all_languages = i18n.get_available_languages() return accept_language.best_match(all_languages) def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(( 'ErrorDocumentMiddleware received an invalid ' 'status %s' % status )) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type') ] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) error = environ.get('translatable_error') user_locale = self.best_match_language(req.accept_language) if (req.accept.best_match(['application/json', 'application/xml']) == 'application/xml'): content_type = 'application/xml' try: # simple check xml is valid fault = etree.fromstring(b'\n'.join(app_iter)) # Add the translated error to the xml data if error is not None: for fault_string in fault.findall('faultstring'): fault_string.text = i18n.translate(error, user_locale) error_message = etree.tostring(fault) body = b''.join((b'', error_message, b'')) except etree.XMLSyntaxError as err: LOG.error(_('Error parsing HTTP response: %s'), err) error_message = state['status_code'] body = '%s' % error_message if six.PY3: body = body.encode('utf-8') else: content_type = 'application/json' app_data = b'\n'.join(app_iter) if six.PY3: app_data = app_data.decode('utf-8') try: fault = json.loads(app_data) if error is not None and 'faultstring' in fault: fault['faultstring'] = i18n.translate(error, user_locale) except ValueError as err: fault = app_data body = json.dumps({'error_message': fault}) if six.PY3: body = body.encode('utf-8') state['headers'].append(('Content-Length', str(len(body)))) state['headers'].append(('Content-Type', content_type)) body = [body] else: body = app_iter return body aodh-2.0.6/aodh/api/app.wsgi0000664000567000056710000000170313076064372016743 0ustar jenkinsjenkins00000000000000# -*- mode: python -*- # # Copyright 2013 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Use this file for deploying the API under mod_wsgi. See http://pecan.readthedocs.org/en/latest/deployment.html for details. """ from aodh.api import app from aodh import service # Initialize the oslo configuration library and logging conf = service.prepare_service([]) application = app.load_app(conf) aodh-2.0.6/aodh/api/__init__.py0000664000567000056710000000173513076064372017406 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg # Register options for the service OPTS = [ cfg.PortOpt('port', default=8042, deprecated_group='DEFAULT', help='The port for the aodh API server.', ), cfg.StrOpt('host', default='0.0.0.0', help='The listen IP for the aodh API server.', ), ] aodh-2.0.6/aodh/api/rbac.py0000664000567000056710000000713013076064372016551 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2014 Hewlett-Packard Company # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Access Control Lists (ACL's) control access the API server.""" import pecan def target_from_segregation_rule(headers, enforcer): """Return a target that corresponds of an alarm returned by segregation rule This allows to use project_id: in an oslo_policy rule for query/listing. :param headers: HTTP headers dictionary :param enforcer: policy enforcer :returns: target """ project_id = get_limited_to_project(headers, enforcer) if project_id is not None: return {'project_id': project_id} return {} def enforce(policy_name, headers, enforcer, target): """Return the user and project the request should be limited to. :param policy_name: the policy name to validate authz against. :param headers: HTTP headers dictionary :param enforcer: policy enforcer :param target: the alarm or "auto" to """ rule_method = "telemetry:" + policy_name credentials = { 'roles': headers.get('X-Roles', "").split(","), 'user_id': headers.get('X-User-Id'), 'project_id': headers.get('X-Project-Id'), } # TODO(sileht): add deprecation warning to be able to remove this: # maintain backward compat with Juno and previous by allowing the action if # there is no rule defined for it rules = enforcer.rules.keys() if rule_method not in rules: return if not enforcer.enforce(rule_method, target, credentials): pecan.core.abort(status_code=403, detail='RBAC Authorization Failed') # TODO(fabiog): these methods are still used because the scoping part is really # convoluted and difficult to separate out. def get_limited_to(headers, enforcer): """Return the user and project the request should be limited to. :param headers: HTTP headers dictionary :param enforcer: policy enforcer :return: A tuple of (user, project), set to None if there's no limit on one of these. """ # TODO(sileht): Only filtering on role work currently for segregation # oslo.policy expects the target to be the alarm. That will allow # to create more enhanced rbac. But for now we enforce the # scoping of request to the project-id, so... target = {} credentials = { 'roles': headers.get('X-Roles', "").split(","), } # maintain backward compat with Juno and previous by using context_is_admin # rule if the segregation rule (added in Kilo) is not defined rules = enforcer.rules.keys() rule_name = 'segregation' if 'segregation' in rules else 'context_is_admin' if not enforcer.enforce(rule_name, target, credentials): return headers.get('X-User-Id'), headers.get('X-Project-Id') return None, None def get_limited_to_project(headers, enforcer): """Return the project the request should be limited to. :param headers: HTTP headers dictionary :return: A project, or None if there's no limit on it. """ return get_limited_to(headers, enforcer)[1] aodh-2.0.6/aodh/api/app.py0000664000567000056710000000653413076064372016431 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2015 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from oslo_config import cfg from oslo_log import log from paste import deploy import pecan from werkzeug import serving from aodh.api import hooks from aodh.api import middleware from aodh.i18n import _ from aodh.i18n import _LW from aodh import service from aodh import storage LOG = log.getLogger(__name__) PECAN_CONFIG = { 'app': { 'root': 'aodh.api.controllers.root.RootController', 'modules': ['aodh.api'], }, } def setup_app(pecan_config=PECAN_CONFIG, conf=None): if conf is None: # NOTE(jd) That sucks but pecan forces us to use kwargs :( raise RuntimeError("Config is actually mandatory") # FIXME: Replace DBHook with a hooks.TransactionHook app_hooks = [hooks.ConfigHook(conf), hooks.DBHook( storage.get_connection_from_config(conf)), hooks.TranslationHook()] pecan.configuration.set_config(dict(pecan_config), overwrite=True) # NOTE(sileht): pecan debug won't work in multi-process environment pecan_debug = conf.api.pecan_debug if conf.api.workers != 1 and pecan_debug: pecan_debug = False LOG.warning(_LW('pecan_debug cannot be enabled, if workers is > 1, ' 'the value is overrided with False')) app = pecan.make_app( pecan_config['app']['root'], debug=pecan_debug, hooks=app_hooks, wrap_app=middleware.ParsableErrorMiddleware, guess_content_type_from_ext=False ) return app def load_app(conf): # Build the WSGI app cfg_file = None cfg_path = conf.api.paste_config if not os.path.isabs(cfg_path): cfg_file = conf.find_file(cfg_path) elif os.path.exists(cfg_path): cfg_file = cfg_path if not cfg_file: raise cfg.ConfigFilesNotFoundError([conf.api.paste_config]) LOG.info("Full WSGI config used: %s" % cfg_file) return deploy.loadapp("config:" + cfg_file) def build_server(conf): app = load_app(conf) # Create the WSGI server and start it host, port = conf.api.host, conf.api.port LOG.info(_('Starting server in PID %s') % os.getpid()) LOG.info(_("Configuration:")) conf.log_opt_values(LOG, logging.INFO) if host == '0.0.0.0': LOG.info(_( 'serving on 0.0.0.0:%(sport)s, view at http://127.0.0.1:%(vport)s') % ({'sport': port, 'vport': port})) else: LOG.info(_("serving on http://%(host)s:%(port)s") % ( {'host': host, 'port': port})) serving.run_simple(host, port, app, processes=conf.api.workers) def _app(): conf = service.prepare_service() return setup_app(conf=conf) def app_factory(global_config, **local_conf): return _app() aodh-2.0.6/aodh/api/hooks.py0000664000567000056710000000335213076064372016767 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from pecan import hooks class ConfigHook(hooks.PecanHook): """Attach the configuration and policy enforcer object to the request. That allows controllers to get it. """ def __init__(self, conf): self.conf = conf self.enforcer = policy.Enforcer(conf, default_rule="default") def before(self, state): state.request.cfg = self.conf state.request.enforcer = self.enforcer class DBHook(hooks.PecanHook): def __init__(self, alarm_conn): self.alarm_storage_connection = alarm_conn def before(self, state): state.request.alarm_storage_conn = self.alarm_storage_connection class TranslationHook(hooks.PecanHook): def after(self, state): # After a request has been done, we need to see if # ClientSideError has added an error onto the response. # If it has we need to get it info the thread-safe WSGI # environ to be used by the ParsableErrorMiddleware. if hasattr(state.response, 'translatable_error'): state.request.environ['translatable_error'] = ( state.response.translatable_error) aodh-2.0.6/aodh/__init__.py0000664000567000056710000000154113076064372016630 0ustar jenkinsjenkins00000000000000# Copyright 2014 eNovance # # Authors: Julien Danjou # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class NotImplementedError(NotImplementedError): # FIXME(jd) This is used by WSME to return a correct HTTP code. We should # not expose it here but wrap our methods in the API to convert it to a # proper HTTP error. code = 501 aodh-2.0.6/aodh/storage/0000775000567000056710000000000013076064720016157 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/mongo/0000775000567000056710000000000013076064720017276 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/mongo/utils.py0000664000567000056710000002363013076064372021017 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # Copyright 2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB backend """ import weakref from oslo_log import log from oslo_utils import netutils import pymongo import retrying from aodh.i18n import _ LOG = log.getLogger(__name__) def make_timestamp_range(start, end, start_timestamp_op=None, end_timestamp_op=None): """Create the query document to find timestamps within that range. This is done by given two possible datetimes and their operations. By default, using $gte for the lower bound and $lt for the upper bound. """ ts_range = {} if start: if start_timestamp_op == 'gt': start_timestamp_op = '$gt' else: start_timestamp_op = '$gte' ts_range[start_timestamp_op] = start if end: if end_timestamp_op == 'le': end_timestamp_op = '$lte' else: end_timestamp_op = '$lt' ts_range[end_timestamp_op] = end return ts_range class ConnectionPool(object): def __init__(self): self._pool = {} def connect(self, url, max_retries, retry_interval): connection_options = pymongo.uri_parser.parse_uri(url) del connection_options['database'] del connection_options['username'] del connection_options['password'] del connection_options['collection'] pool_key = tuple(connection_options) if pool_key in self._pool: client = self._pool.get(pool_key)() if client: return client splitted_url = netutils.urlsplit(url) log_data = {'db': splitted_url.scheme, 'nodelist': connection_options['nodelist']} LOG.info(_('Connecting to %(db)s on %(nodelist)s') % log_data) try: client = MongoProxy( pymongo.MongoClient(url), max_retries, retry_interval, ) except pymongo.errors.ConnectionFailure as e: LOG.warning(_('Unable to connect to the database server: ' '%(errmsg)s.') % {'errmsg': e}) raise self._pool[pool_key] = weakref.ref(client) return client class QueryTransformer(object): operators = {"<": "$lt", ">": "$gt", "<=": "$lte", "=<": "$lte", ">=": "$gte", "=>": "$gte", "!=": "$ne", "in": "$in", "=~": "$regex"} complex_operators = {"or": "$or", "and": "$and"} ordering_functions = {"asc": pymongo.ASCENDING, "desc": pymongo.DESCENDING} def transform_orderby(self, orderby): orderby_filter = [] for field in orderby: field_name = list(field.keys())[0] ordering = self.ordering_functions[list(field.values())[0]] orderby_filter.append((field_name, ordering)) return orderby_filter @staticmethod def _move_negation_to_leaf(condition): """Moves every not operator to the leafs. Moving is going by applying the De Morgan rules and annihilating double negations. """ def _apply_de_morgan(tree, negated_subtree, negated_op): if negated_op == "and": new_op = "or" else: new_op = "and" tree[new_op] = [{"not": child} for child in negated_subtree[negated_op]] del tree["not"] def transform(subtree): op = list(subtree.keys())[0] if op in ["and", "or"]: [transform(child) for child in subtree[op]] elif op == "not": negated_tree = subtree[op] negated_op = list(negated_tree.keys())[0] if negated_op == "and": _apply_de_morgan(subtree, negated_tree, negated_op) transform(subtree) elif negated_op == "or": _apply_de_morgan(subtree, negated_tree, negated_op) transform(subtree) elif negated_op == "not": # two consecutive not annihilates themselves value = list(negated_tree.values())[0] new_op = list(value.keys())[0] subtree[new_op] = negated_tree[negated_op][new_op] del subtree["not"] transform(subtree) transform(condition) def transform_filter(self, condition): # in Mongo not operator can only be applied to # simple expressions so we have to move every # not operator to the leafs of the expression tree self._move_negation_to_leaf(condition) return self._process_json_tree(condition) def _handle_complex_op(self, complex_op, nodes): element_list = [] for node in nodes: element = self._process_json_tree(node) element_list.append(element) complex_operator = self.complex_operators[complex_op] op = {complex_operator: element_list} return op def _handle_not_op(self, negated_tree): # assumes that not is moved to the leaf already # so we are next to a leaf negated_op = list(negated_tree.keys())[0] negated_field = list(negated_tree[negated_op].keys())[0] value = negated_tree[negated_op][negated_field] if negated_op == "=": return {negated_field: {"$ne": value}} elif negated_op == "!=": return {negated_field: value} else: return {negated_field: {"$not": {self.operators[negated_op]: value}}} def _handle_simple_op(self, simple_op, nodes): field_name = list(nodes.keys())[0] field_value = list(nodes.values())[0] # no operator for equal in Mongo if simple_op == "=": op = {field_name: field_value} return op operator = self.operators[simple_op] op = {field_name: {operator: field_value}} return op def _process_json_tree(self, condition_tree): operator_node = list(condition_tree.keys())[0] nodes = list(condition_tree.values())[0] if operator_node in self.complex_operators: return self._handle_complex_op(operator_node, nodes) if operator_node == "not": negated_tree = condition_tree[operator_node] return self._handle_not_op(negated_tree) return self._handle_simple_op(operator_node, nodes) MONGO_METHODS = set([typ for typ in dir(pymongo.collection.Collection) if not typ.startswith('_')]) MONGO_METHODS.update(set([typ for typ in dir(pymongo.MongoClient) if not typ.startswith('_')])) MONGO_METHODS.update(set([typ for typ in dir(pymongo) if not typ.startswith('_')])) def _safe_mongo_call(max_retries, retry_interval): return retrying.retry( retry_on_exception=lambda e: isinstance( e, pymongo.errors.AutoReconnect), wait_fixed=retry_interval * 1000, stop_max_attempt_number=max_retries if max_retries >= 0 else None ) class MongoProxy(object): def __init__(self, conn, max_retries, retry_interval): self.conn = conn self.max_retries = max_retries self.retry_interval = retry_interval def __getitem__(self, item): """Create and return proxy around the method in the connection. :param item: name of the connection """ return MongoProxy(self.conn[item]) def find(self, *args, **kwargs): # We need this modifying method to return a CursorProxy object so that # we can handle the Cursor next function to catch the AutoReconnect # exception. return CursorProxy(self.conn.find(*args, **kwargs), self.max_retries, self.retry_interval) def __getattr__(self, item): """Wrap MongoDB connection. If item is the name of an executable method, for example find or insert, wrap this method to retry. Else wrap getting attribute with MongoProxy. """ if item in ('name', 'database'): return getattr(self.conn, item) if item in MONGO_METHODS: return _safe_mongo_call( self.max_retries, self.retry_interval, )(getattr(self.conn, item)) return MongoProxy(getattr(self.conn, item), self.max_retries, self.retry_interval) def __call__(self, *args, **kwargs): return self.conn(*args, **kwargs) class CursorProxy(pymongo.cursor.Cursor): def __init__(self, cursor, max_retries, retry_interval): self.cursor = cursor self.next = _safe_mongo_call( max_retries, retry_interval)(self._next) def __getitem__(self, item): return self.cursor[item] def _next(self): """Wrap Cursor next method. This method will be executed before each Cursor next method call. """ try: save_cursor = self.cursor.clone() return self.cursor.next() except pymongo.errors.AutoReconnect: self.cursor = save_cursor raise def __getattr__(self, item): return getattr(self.cursor, item) aodh-2.0.6/aodh/storage/mongo/__init__.py0000664000567000056710000000000013076064372021400 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/impl_sqlalchemy.py0000664000567000056710000003540413076064372021725 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" from __future__ import absolute_import import datetime import os.path from alembic import command from alembic import config from alembic import migration from oslo_db import exception from oslo_db.sqlalchemy import session as db_session from oslo_log import log from oslo_utils import timeutils import six from sqlalchemy import desc from aodh.i18n import _LI from aodh import storage from aodh.storage import base from aodh.storage import models as alarm_api_models from aodh.storage.sqlalchemy import models from aodh.storage.sqlalchemy import utils as sql_utils LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Put the data into a SQLAlchemy database. """ CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = base.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def __init__(self, conf, url): # Set max_retries to 0, since oslo.db in certain cases may attempt # to retry making the db connection retried max_retries ^ 2 times # in failure case and db reconnection has already been implemented # in storage.__init__.get_connection_from_config function options = dict(conf.database.items()) options['max_retries'] = 0 # oslo.db doesn't support options defined by Aodh for opt in storage.OPTS: options.pop(opt.name, None) self._engine_facade = db_session.EngineFacade(url, **options) self.conf = conf def disconnect(self): self._engine_facade.get_engine().dispose() def _get_alembic_config(self): cfg = config.Config( "%s/sqlalchemy/alembic/alembic.ini" % os.path.dirname(__file__)) cfg.set_main_option('sqlalchemy.url', self.conf.database.connection) return cfg def upgrade(self, nocreate=False): cfg = self._get_alembic_config() cfg.conf = self.conf if nocreate: command.upgrade(cfg, "head") else: engine = self._engine_facade.get_engine() ctxt = migration.MigrationContext.configure(engine.connect()) current_version = ctxt.get_current_revision() if current_version is None: try: models.Base.metadata.create_all(engine, checkfirst=False) except exception.DBError: # Assume tables exist from Ceilometer, take control # FIXME(jd) Remove in Ocata command.stamp(cfg, "12fe8fac9fe4") command.upgrade(cfg, "head") else: command.stamp(cfg, "head") else: command.upgrade(cfg, "head") def clear(self): engine = self._engine_facade.get_engine() for table in reversed(models.Base.metadata.sorted_tables): engine.execute(table.delete()) engine.dispose() def _retrieve_data(self, filter_expr, orderby, limit, table): if limit == 0: return [] session = self._engine_facade.get_session() engine = self._engine_facade.get_engine() query = session.query(table) transformer = sql_utils.QueryTransformer(table, query, dialect=engine.dialect.name) if filter_expr is not None: transformer.apply_filter(filter_expr) transformer.apply_options(orderby, limit) retrieve = {models.Alarm: self._retrieve_alarms, models.AlarmChange: self._retrieve_alarm_history} return retrieve[table](transformer.get_query()) @staticmethod def _row_to_alarm_model(row): return alarm_api_models.Alarm(alarm_id=row.alarm_id, enabled=row.enabled, type=row.type, name=row.name, description=row.description, timestamp=row.timestamp, user_id=row.user_id, project_id=row.project_id, state=row.state, state_timestamp=row.state_timestamp, ok_actions=row.ok_actions, alarm_actions=row.alarm_actions, insufficient_data_actions=( row.insufficient_data_actions), rule=row.rule, time_constraints=row.time_constraints, repeat_actions=row.repeat_actions, severity=row.severity) def _retrieve_alarms(self, query): return (self._row_to_alarm_model(x) for x in query.all()) def get_alarms(self, name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, alarm_type=None, severity=None, exclude=None): """Yields a lists of alarms that match filters. :param name: Optional name for alarm. :param user: Optional ID for user that owns the resource. :param state: Optional string for alarm state. :param meter: Optional string for alarms associated with meter. :param project: Optional ID for project that owns the resource. :param enabled: Optional boolean to list disable alarm. :param alarm_id: Optional alarm_id to return one alarm. :param alarm_type: Optional alarm type. :param severity: Optional alarm severity. :param exclude: Optional dict for inequality constraint. """ session = self._engine_facade.get_session() query = session.query(models.Alarm) if name is not None: query = query.filter(models.Alarm.name == name) if enabled is not None: query = query.filter(models.Alarm.enabled == enabled) if user is not None: query = query.filter(models.Alarm.user_id == user) if project is not None: query = query.filter(models.Alarm.project_id == project) if alarm_id is not None: query = query.filter(models.Alarm.alarm_id == alarm_id) if state is not None: query = query.filter(models.Alarm.state == state) if alarm_type is not None: query = query.filter(models.Alarm.type == alarm_type) if severity is not None: query = query.filter(models.Alarm.severity == severity) if exclude is not None: for key, value in six.iteritems(exclude): query = query.filter(getattr(models.Alarm, key) != value) query = query.order_by(desc(models.Alarm.timestamp)) alarms = self._retrieve_alarms(query) # TODO(cmart): improve this by using sqlalchemy.func factory if meter is not None: alarms = filter(lambda row: row.rule.get('meter_name', None) == meter, alarms) return alarms def create_alarm(self, alarm): """Create an alarm. :param alarm: The alarm to create. """ session = self._engine_facade.get_session() with session.begin(): alarm_row = models.Alarm(alarm_id=alarm.alarm_id) alarm_row.update(alarm.as_dict()) session.add(alarm_row) return self._row_to_alarm_model(alarm_row) def update_alarm(self, alarm): """Update an alarm. :param alarm: the new Alarm to update """ session = self._engine_facade.get_session() with session.begin(): alarm_row = session.merge(models.Alarm(alarm_id=alarm.alarm_id)) alarm_row.update(alarm.as_dict()) return self._row_to_alarm_model(alarm_row) def delete_alarm(self, alarm_id): """Delete an alarm and its history data. :param alarm_id: ID of the alarm to delete """ session = self._engine_facade.get_session() with session.begin(): session.query(models.Alarm).filter( models.Alarm.alarm_id == alarm_id).delete() # FIXME(liusheng): we should use delete cascade session.query(models.AlarmChange).filter( models.AlarmChange.alarm_id == alarm_id).delete() @staticmethod def _row_to_alarm_change_model(row): return alarm_api_models.AlarmChange(event_id=row.event_id, alarm_id=row.alarm_id, type=row.type, detail=row.detail, user_id=row.user_id, project_id=row.project_id, on_behalf_of=row.on_behalf_of, timestamp=row.timestamp) def query_alarms(self, filter_expr=None, orderby=None, limit=None): """Yields a lists of alarms that match filter.""" return self._retrieve_data(filter_expr, orderby, limit, models.Alarm) def _retrieve_alarm_history(self, query): return (self._row_to_alarm_change_model(x) for x in query.all()) def query_alarm_history(self, filter_expr=None, orderby=None, limit=None): """Return an iterable of model.AlarmChange objects.""" return self._retrieve_data(filter_expr, orderby, limit, models.AlarmChange) def get_alarm_changes(self, alarm_id, on_behalf_of, user=None, project=None, alarm_type=None, severity=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None): """Yields list of AlarmChanges describing alarm history Changes are always sorted in reverse order of occurrence, given the importance of currency. Segregation for non-administrative users is done on the basis of the on_behalf_of parameter. This allows such users to have visibility on both the changes initiated by themselves directly (generally creation, rule changes, or deletion) and also on those changes initiated on their behalf by the alarming service (state transitions after alarm thresholds are crossed). :param alarm_id: ID of alarm to return changes for :param on_behalf_of: ID of tenant to scope changes query (None for administrative user, indicating all projects) :param user: Optional ID of user to return changes for :param project: Optional ID of project to return changes for :param alarm_type: Optional change type :param severity: Optional alarm severity :param start_timestamp: Optional modified timestamp start range :param start_timestamp_op: Optional timestamp start range operation :param end_timestamp: Optional modified timestamp end range :param end_timestamp_op: Optional timestamp end range operation """ session = self._engine_facade.get_session() query = session.query(models.AlarmChange) query = query.filter(models.AlarmChange.alarm_id == alarm_id) if on_behalf_of is not None: query = query.filter( models.AlarmChange.on_behalf_of == on_behalf_of) if user is not None: query = query.filter(models.AlarmChange.user_id == user) if project is not None: query = query.filter(models.AlarmChange.project_id == project) if alarm_type is not None: query = query.filter(models.AlarmChange.type == alarm_type) if severity is not None: query = query.filter(models.AlarmChange.severity == severity) if start_timestamp: if start_timestamp_op == 'gt': query = query.filter( models.AlarmChange.timestamp > start_timestamp) else: query = query.filter( models.AlarmChange.timestamp >= start_timestamp) if end_timestamp: if end_timestamp_op == 'le': query = query.filter( models.AlarmChange.timestamp <= end_timestamp) else: query = query.filter( models.AlarmChange.timestamp < end_timestamp) query = query.order_by(desc(models.AlarmChange.timestamp)) return self._retrieve_alarm_history(query) def record_alarm_change(self, alarm_change): """Record alarm change event.""" session = self._engine_facade.get_session() with session.begin(): alarm_change_row = models.AlarmChange( event_id=alarm_change['event_id']) alarm_change_row.update(alarm_change) session.add(alarm_change_row) def clear_expired_alarm_history_data(self, alarm_history_ttl): """Clear expired alarm history data from the backend storage system. Clearing occurs according to the time-to-live. :param alarm_history_ttl: Number of seconds to keep alarm history records for. """ session = self._engine_facade.get_session() with session.begin(): valid_start = (timeutils.utcnow() - datetime.timedelta(seconds=alarm_history_ttl)) deleted_rows = (session.query(models.AlarmChange) .filter(models.AlarmChange.timestamp < valid_start) .delete()) LOG.info(_LI("%d alarm histories are removed from database"), deleted_rows) aodh-2.0.6/aodh/storage/impl_hbase.py0000664000567000056710000001530313076064372020641 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import operator from oslo_log import log import aodh from aodh.storage import base from aodh.storage.hbase import base as hbase_base from aodh.storage.hbase import migration as hbase_migration from aodh.storage.hbase import utils as hbase_utils from aodh.storage import models LOG = log.getLogger(__name__) AVAILABLE_CAPABILITIES = { 'alarms': {'query': {'simple': True, 'complex': False}, 'history': {'query': {'simple': True, 'complex': False}}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(hbase_base.Connection, base.Connection): """Put the alarm data into a HBase database Collections: - alarm: - row_key: uuid of alarm - Column Families: f: contains the raw incoming alarm data - alarm_h: - row_key: uuid of alarm + ":" + reversed timestamp - Column Families: f: raw incoming alarm_history data. Timestamp becomes now() if not determined """ CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES, AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = base.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) _memory_instance = None ALARM_TABLE = "alarm" ALARM_HISTORY_TABLE = "alarm_h" def upgrade(self): tables = [self.ALARM_HISTORY_TABLE, self.ALARM_TABLE] column_families = {'f': dict()} with self.conn_pool.connection() as conn: hbase_utils.create_tables(conn, tables, column_families) hbase_migration.migrate_tables(conn, tables) def clear(self): LOG.debug('Dropping HBase schema...') with self.conn_pool.connection() as conn: for table in [self.ALARM_TABLE, self.ALARM_HISTORY_TABLE]: try: conn.disable_table(table) except Exception: LOG.debug('Cannot disable table but ignoring error') try: conn.delete_table(table) except Exception: LOG.debug('Cannot delete table but ignoring error') def update_alarm(self, alarm): """Create an alarm. :param alarm: The alarm to create. It is Alarm object, so we need to call as_dict() """ _id = alarm.alarm_id alarm_to_store = hbase_utils.serialize_entry(alarm.as_dict()) with self.conn_pool.connection() as conn: alarm_table = conn.table(self.ALARM_TABLE) alarm_table.put(_id, alarm_to_store) stored_alarm = hbase_utils.deserialize_entry( alarm_table.row(_id)) return models.Alarm(**stored_alarm) create_alarm = update_alarm def delete_alarm(self, alarm_id): """Delete an alarm and its history data.""" with self.conn_pool.connection() as conn: alarm_table = conn.table(self.ALARM_TABLE) alarm_table.delete(alarm_id) q = hbase_utils.make_query(alarm_id=alarm_id) alarm_history_table = conn.table(self.ALARM_HISTORY_TABLE) for alarm_id, ignored in alarm_history_table.scan(filter=q): alarm_history_table.delete(alarm_id) def get_alarms(self, name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, alarm_type=None, severity=None, exclude=None): if meter: raise aodh.NotImplementedError( 'Filter by meter not implemented') q = hbase_utils.make_query(alarm_id=alarm_id, name=name, enabled=enabled, user_id=user, project_id=project, state=state, type=alarm_type, severity=severity, exclude=exclude) with self.conn_pool.connection() as conn: alarm_table = conn.table(self.ALARM_TABLE) gen = alarm_table.scan(filter=q) alarms = [hbase_utils.deserialize_entry(data) for ignored, data in gen] for alarm in sorted( alarms, key=operator.itemgetter('timestamp'), reverse=True): yield models.Alarm(**alarm) def get_alarm_changes(self, alarm_id, on_behalf_of, user=None, project=None, alarm_type=None, severity=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None): q = hbase_utils.make_query(alarm_id=alarm_id, on_behalf_of=on_behalf_of, type=alarm_type, user_id=user, project_id=project, severity=severity) start_row, end_row = hbase_utils.make_timestamp_query( hbase_utils.make_general_rowkey_scan, start=start_timestamp, start_op=start_timestamp_op, end=end_timestamp, end_op=end_timestamp_op, bounds_only=True, some_id=alarm_id) with self.conn_pool.connection() as conn: alarm_history_table = conn.table(self.ALARM_HISTORY_TABLE) gen = alarm_history_table.scan(filter=q, row_start=start_row, row_stop=end_row) for ignored, data in gen: stored_entry = hbase_utils.deserialize_entry(data) yield models.AlarmChange(**stored_entry) def record_alarm_change(self, alarm_change): """Record alarm change event.""" alarm_change_dict = hbase_utils.serialize_entry(alarm_change) ts = alarm_change.get('timestamp') or datetime.datetime.now() rts = hbase_utils.timestamp(ts) with self.conn_pool.connection() as conn: alarm_history_table = conn.table(self.ALARM_HISTORY_TABLE) alarm_history_table.put( hbase_utils.prepare_key(alarm_change.get('alarm_id'), rts), alarm_change_dict) aodh-2.0.6/aodh/storage/models.py0000664000567000056710000001143713076064372020025 0ustar jenkinsjenkins00000000000000# # Copyright 2013 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Model classes for use in the storage API. """ import datetime from aodh.i18n import _ from aodh.storage import base class Alarm(base.Model): ALARM_INSUFFICIENT_DATA = 'insufficient data' ALARM_OK = 'ok' ALARM_ALARM = 'alarm' ALARM_ACTIONS_MAP = { ALARM_INSUFFICIENT_DATA: 'insufficient_data_actions', ALARM_OK: 'ok_actions', ALARM_ALARM: 'alarm_actions', } ALARM_LEVEL_LOW = 'low' ALARM_LEVEL_MODERATE = 'moderate' ALARM_LEVEL_CRITICAL = 'critical' """ An alarm to monitor. :param alarm_id: UUID of the alarm :param type: type of the alarm :param name: The Alarm name :param description: User friendly description of the alarm :param enabled: Is the alarm enabled :param state: Alarm state (ok/alarm/insufficient data) :param rule: A rule that defines when the alarm fires :param user_id: the owner/creator of the alarm :param project_id: the project_id of the creator :param evaluation_periods: the number of periods :param period: the time period in seconds :param time_constraints: the list of the alarm's time constraints, if any :param timestamp: the timestamp when the alarm was last updated :param state_timestamp: the timestamp of the last state change :param ok_actions: the list of webhooks to call when entering the ok state :param alarm_actions: the list of webhooks to call when entering the alarm state :param insufficient_data_actions: the list of webhooks to call when entering the insufficient data state :param repeat_actions: Is the actions should be triggered on each alarm evaluation. :param severity: Alarm level (low/moderate/critical) """ def __init__(self, alarm_id, type, enabled, name, description, timestamp, user_id, project_id, state, state_timestamp, ok_actions, alarm_actions, insufficient_data_actions, repeat_actions, rule, time_constraints, severity=None): if not isinstance(timestamp, datetime.datetime): raise TypeError(_("timestamp should be datetime object")) if not isinstance(state_timestamp, datetime.datetime): raise TypeError(_("state_timestamp should be datetime object")) base.Model.__init__( self, alarm_id=alarm_id, type=type, enabled=enabled, name=name, description=description, timestamp=timestamp, user_id=user_id, project_id=project_id, state=state, state_timestamp=state_timestamp, ok_actions=ok_actions, alarm_actions=alarm_actions, insufficient_data_actions=insufficient_data_actions, repeat_actions=repeat_actions, rule=rule, time_constraints=time_constraints, severity=severity) class AlarmChange(base.Model): """Record of an alarm change. :param event_id: UUID of the change event :param alarm_id: UUID of the alarm :param type: The type of change :param severity: The severity of alarm :param detail: JSON fragment describing change :param user_id: the user ID of the initiating identity :param project_id: the project ID of the initiating identity :param on_behalf_of: the tenant on behalf of which the change is being made :param timestamp: the timestamp of the change """ CREATION = 'creation' RULE_CHANGE = 'rule change' STATE_TRANSITION = 'state transition' def __init__(self, event_id, alarm_id, type, detail, user_id, project_id, on_behalf_of, severity=None, timestamp=None ): base.Model.__init__( self, event_id=event_id, alarm_id=alarm_id, type=type, severity=severity, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=on_behalf_of, timestamp=timestamp) aodh-2.0.6/aodh/storage/__init__.py0000664000567000056710000001223213076064372020273 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Storage backend management """ import datetime from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import retrying import six.moves.urllib.parse as urlparse from stevedore import driver import warnings _NAMESPACE = 'aodh.storage' LOG = log.getLogger(__name__) OPTS = [ cfg.IntOpt('alarm_history_time_to_live', default=-1, help=("Number of seconds that alarm histories are kept " "in the database for (<= 0 means forever).")), cfg.StrOpt('alarm_connection', secret=True, deprecated_for_removal=True, help='The connection string used to connect ' 'to the alarm database - rather use ${database.connection}'), ] class StorageBadVersion(Exception): """Error raised when the storage backend version is not good enough.""" def get_connection_from_config(conf): retries = conf.database.max_retries if conf.database.alarm_connection is None: url = conf.database.connection else: url = conf.database.alarm_connection connection_scheme = urlparse.urlparse(url).scheme if connection_scheme not in ('mysql', 'mysql+pymysql', 'postgresql', 'sqlite'): msg = ('Storage backend %s is deprecated, and all the NoSQL backends ' 'will be removed in Aodh 4.0, please use SQL backend.' % connection_scheme) warnings.warn(msg) LOG.debug('looking for %(name)r driver in %(namespace)r', {'name': connection_scheme, 'namespace': _NAMESPACE}) mgr = driver.DriverManager(_NAMESPACE, connection_scheme) # Convert retry_interval secs to msecs for retry decorator @retrying.retry(wait_fixed=conf.database.retry_interval * 1000, stop_max_attempt_number=retries if retries >= 0 else None) def _get_connection(): """Return an open connection to the database.""" return mgr.driver(conf, url) return _get_connection() class SampleFilter(object): """Holds the properties for building a query from a meter/sample filter. :param user: The sample owner. :param project: The sample project. :param start_timestamp: Earliest time point in the request. :param start_timestamp_op: Earliest timestamp operation in the request. :param end_timestamp: Latest time point in the request. :param end_timestamp_op: Latest timestamp operation in the request. :param resource: Optional filter for resource id. :param meter: Optional filter for meter type using the meter name. :param source: Optional source filter. :param message_id: Optional sample_id filter. :param metaquery: Optional filter on the metadata """ def __init__(self, user=None, project=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None, resource=None, meter=None, source=None, message_id=None, metaquery=None): self.user = user self.project = project self.start_timestamp = self.sanitize_timestamp(start_timestamp) self.start_timestamp_op = start_timestamp_op self.end_timestamp = self.sanitize_timestamp(end_timestamp) self.end_timestamp_op = end_timestamp_op self.resource = resource self.meter = meter self.source = source self.metaquery = metaquery or {} self.message_id = message_id @staticmethod def sanitize_timestamp(timestamp): """Return a naive utc datetime object.""" if not timestamp: return timestamp if not isinstance(timestamp, datetime.datetime): timestamp = timeutils.parse_isotime(timestamp) return timeutils.normalize_time(timestamp) def __repr__(self): return ("" % (self.user, self.project, self.start_timestamp, self.start_timestamp_op, self.end_timestamp, self.end_timestamp_op, self.resource, self.meter, self.source, self.metaquery, self.message_id)) aodh-2.0.6/aodh/storage/pymongo_base.py0000664000567000056710000002761613076064372021232 0ustar jenkinsjenkins00000000000000# # Copyright Ericsson AB 2013. All rights reserved # # Authors: Ildiko Vancsa # Balazs Gibizer # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Common functions for MongoDB backend """ import pymongo import six from aodh.storage import base from aodh.storage import models from aodh.storage.mongo import utils as pymongo_utils COMMON_AVAILABLE_CAPABILITIES = { 'alarms': {'query': {'simple': True, 'complex': True}, 'history': {'query': {'simple': True, 'complex': True}}}, } AVAILABLE_STORAGE_CAPABILITIES = { 'storage': {'production_ready': True}, } class Connection(base.Connection): """Base Alarm Connection class for MongoDB driver.""" CAPABILITIES = base.update_nested(base.Connection.CAPABILITIES, COMMON_AVAILABLE_CAPABILITIES) STORAGE_CAPABILITIES = base.update_nested( base.Connection.STORAGE_CAPABILITIES, AVAILABLE_STORAGE_CAPABILITIES, ) def upgrade(self): # create collection if not present if 'alarm' not in self.db.conn.collection_names(): self.db.conn.create_collection('alarm') if 'alarm_history' not in self.db.conn.collection_names(): self.db.conn.create_collection('alarm_history') def update_alarm(self, alarm): """Update alarm.""" data = alarm.as_dict() self.db.alarm.update( {'alarm_id': alarm.alarm_id}, {'$set': data}, upsert=True) stored_alarm = self.db.alarm.find({'alarm_id': alarm.alarm_id})[0] del stored_alarm['_id'] self._ensure_encapsulated_rule_format(stored_alarm) self._ensure_time_constraints(stored_alarm) return models.Alarm(**stored_alarm) create_alarm = update_alarm def delete_alarm(self, alarm_id): """Delete an alarm and its history data.""" self.db.alarm.remove({'alarm_id': alarm_id}) self.db.alarm_history.remove({'alarm_id': alarm_id}) def record_alarm_change(self, alarm_change): """Record alarm change event.""" self.db.alarm_history.insert(alarm_change.copy()) def get_alarms(self, name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, alarm_type=None, severity=None, exclude=None): """Yields a lists of alarms that match filters. :param name: Optional name for alarm. :param user: Optional ID for user that owns the resource. :param state: Optional string for alarm state. :param meter: Optional string for alarms associated with meter. :param project: Optional ID for project that owns the resource. :param enabled: Optional boolean to list disable alarm. :param alarm_id: Optional alarm_id to return one alarm. :param alarm_type: Optional alarm type. :param severity: Optional alarm severity. :param exclude: Optional dict for inequality constraint. """ q = {} if user is not None: q['user_id'] = user if project is not None: q['project_id'] = project if name is not None: q['name'] = name if enabled is not None: q['enabled'] = enabled if alarm_id is not None: q['alarm_id'] = alarm_id if state is not None: q['state'] = state if meter is not None: q['rule.meter_name'] = meter if alarm_type is not None: q['type'] = alarm_type if severity is not None: q['severity'] = severity if exclude is not None: for key, value in six.iteritems(exclude): q[key] = {'$ne': value} return self._retrieve_alarms(q, [("timestamp", pymongo.DESCENDING)], None) def get_alarm_changes(self, alarm_id, on_behalf_of, user=None, project=None, alarm_type=None, severity=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None): """Yields list of AlarmChanges describing alarm history Changes are always sorted in reverse order of occurrence, given the importance of currency. Segregation for non-administrative users is done on the basis of the on_behalf_of parameter. This allows such users to have visibility on both the changes initiated by themselves directly (generally creation, rule changes, or deletion) and also on those changes initiated on their behalf by the alarming service (state transitions after alarm thresholds are crossed). :param alarm_id: ID of alarm to return changes for :param on_behalf_of: ID of tenant to scope changes query (None for administrative user, indicating all projects) :param user: Optional ID of user to return changes for :param project: Optional ID of project to return changes for :param alarm_type: Optional change type :param severity: Optional change severity :param start_timestamp: Optional modified timestamp start range :param start_timestamp_op: Optional timestamp start range operation :param end_timestamp: Optional modified timestamp end range :param end_timestamp_op: Optional timestamp end range operation """ q = dict(alarm_id=alarm_id) if on_behalf_of is not None: q['on_behalf_of'] = on_behalf_of if user is not None: q['user_id'] = user if project is not None: q['project_id'] = project if alarm_type is not None: q['type'] = alarm_type if severity is not None: q['severity'] = severity if start_timestamp or end_timestamp: ts_range = pymongo_utils.make_timestamp_range(start_timestamp, end_timestamp, start_timestamp_op, end_timestamp_op) if ts_range: q['timestamp'] = ts_range return self._retrieve_alarm_changes(q, [("timestamp", pymongo.DESCENDING)], None) def query_alarms(self, filter_expr=None, orderby=None, limit=None): """Return an iterable of model.Alarm objects.""" return self._retrieve_data(filter_expr, orderby, limit, models.Alarm) def query_alarm_history(self, filter_expr=None, orderby=None, limit=None): """Return an iterable of model.AlarmChange objects.""" return self._retrieve_data(filter_expr, orderby, limit, models.AlarmChange) def _retrieve_data(self, filter_expr, orderby, limit, model): if limit == 0: return [] query_filter = {} orderby_filter = [("timestamp", pymongo.DESCENDING)] transformer = pymongo_utils.QueryTransformer() if orderby is not None: orderby_filter = transformer.transform_orderby(orderby) if filter_expr is not None: query_filter = transformer.transform_filter(filter_expr) retrieve = {models.Alarm: self._retrieve_alarms, models.AlarmChange: self._retrieve_alarm_changes} return retrieve[model](query_filter, orderby_filter, limit) def _retrieve_alarms(self, query_filter, orderby, limit): if limit is not None: alarms = self.db.alarm.find(query_filter, limit=limit, sort=orderby) else: alarms = self.db.alarm.find(query_filter, sort=orderby) for alarm in alarms: a = {} a.update(alarm) del a['_id'] self._ensure_encapsulated_rule_format(a) self._ensure_time_constraints(a) yield models.Alarm(**a) def _retrieve_alarm_changes(self, query_filter, orderby, limit): if limit is not None: alarms_history = self.db.alarm_history.find(query_filter, limit=limit, sort=orderby) else: alarms_history = self.db.alarm_history.find( query_filter, sort=orderby) for alarm_history in alarms_history: ah = {} ah.update(alarm_history) del ah['_id'] yield models.AlarmChange(**ah) @classmethod def _ensure_encapsulated_rule_format(cls, alarm): """Ensure the alarm returned by the storage have the correct format. The previous format looks like: { 'alarm_id': '0ld-4l3rt', 'enabled': True, 'name': 'old-alert', 'description': 'old-alert', 'timestamp': None, 'meter_name': 'cpu', 'user_id': 'me', 'project_id': 'and-da-boys', 'comparison_operator': 'lt', 'threshold': 36, 'statistic': 'count', 'evaluation_periods': 1, 'period': 60, 'state': "insufficient data", 'state_timestamp': None, 'ok_actions': [], 'alarm_actions': ['http://nowhere/alarms'], 'insufficient_data_actions': [], 'repeat_actions': False, 'matching_metadata': {'key': 'value'} # or 'matching_metadata': [{'key': 'key', 'value': 'value'}] } """ if isinstance(alarm.get('rule'), dict): return alarm['type'] = 'threshold' alarm['rule'] = {} alarm['matching_metadata'] = cls._decode_matching_metadata( alarm['matching_metadata']) for field in ['period', 'evaluation_periods', 'threshold', 'statistic', 'comparison_operator', 'meter_name']: if field in alarm: alarm['rule'][field] = alarm[field] del alarm[field] query = [] for key in alarm['matching_metadata']: query.append({'field': key, 'op': 'eq', 'value': alarm['matching_metadata'][key], 'type': 'string'}) del alarm['matching_metadata'] alarm['rule']['query'] = query @staticmethod def _decode_matching_metadata(matching_metadata): if isinstance(matching_metadata, dict): # note(sileht): keep compatibility with alarm # with matching_metadata as a dict return matching_metadata else: new_matching_metadata = {} for elem in matching_metadata: new_matching_metadata[elem['key']] = elem['value'] return new_matching_metadata @staticmethod def _ensure_time_constraints(alarm): """Ensures the alarm has a time constraints field.""" if 'time_constraints' not in alarm: alarm['time_constraints'] = [] aodh-2.0.6/aodh/storage/impl_mongodb.py0000664000567000056710000001020413076064372021177 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # Copyright 2013 eNovance # Copyright 2014-2015 Red Hat, Inc # # Authors: Doug Hellmann # Julien Danjou # Eoghan Glynn # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """MongoDB storage backend""" from oslo_log import log import pymongo from aodh import storage from aodh.storage.mongo import utils as pymongo_utils from aodh.storage import pymongo_base LOG = log.getLogger(__name__) class Connection(pymongo_base.Connection): """Put the alarm data into a MongoDB database.""" CONNECTION_POOL = pymongo_utils.ConnectionPool() def __init__(self, conf, url): self.conf = conf # NOTE(jd) Use our own connection pooling on top of the Pymongo one./ # We need that otherwise we overflow the MongoDB instance with new # connection since we instantiate a Pymongo client each time someone # requires a new storage connection. self.conn = self.CONNECTION_POOL.connect( url, conf.database.max_retries, conf.database.retry_interval) # Require MongoDB 2.4 to use $setOnInsert if self.conn.server_info()['versionArray'] < [2, 4]: raise storage.StorageBadVersion("Need at least MongoDB 2.4") connection_options = pymongo.uri_parser.parse_uri(url) self.db = getattr(self.conn, connection_options['database']) if connection_options.get('username'): self.db.authenticate(connection_options['username'], connection_options['password']) # NOTE(jd) Upgrading is just about creating index, so let's do this # on connection to be sure at least the TTL is correctly updated if # needed. self.upgrade() @staticmethod def update_ttl(ttl, ttl_index_name, index_field, coll): """Update or ensure time_to_live indexes. :param ttl: time to live in seconds. :param ttl_index_name: name of the index we want to update or ensure. :param index_field: field with the index that we need to update. :param coll: collection which indexes need to be updated. """ indexes = coll.index_information() if ttl <= 0: if ttl_index_name in indexes: coll.drop_index(ttl_index_name) return if ttl_index_name in indexes: return coll.database.command( 'collMod', coll.name, index={'keyPattern': {index_field: pymongo.ASCENDING}, 'expireAfterSeconds': ttl}) coll.ensure_index([(index_field, pymongo.ASCENDING)], expireAfterSeconds=ttl, name=ttl_index_name) def upgrade(self): super(Connection, self).upgrade() # Establish indexes ttl = self.conf.database.alarm_history_time_to_live self.update_ttl( ttl, 'alarm_history_ttl', 'timestamp', self.db.alarm_history) def clear(self): self.conn.drop_database(self.db.name) # Connection will be reopened automatically if needed self.conn.close() def clear_expired_alarm_history_data(self, alarm_history_ttl): """Clear expired alarm history data from the backend storage system. Clearing occurs according to the time-to-live. :param alarm_history_ttl: Number of seconds to keep alarm history records for. """ LOG.debug("Clearing expired alarm history data is based on native " "MongoDB time to live feature and going in background.") aodh-2.0.6/aodh/storage/base.py0000664000567000056710000002101713076064372017447 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for storage engines """ import copy import inspect import six import aodh def dict_to_keyval(value, key_base=None): """Expand a given dict to its corresponding key-value pairs. Generated keys are fully qualified, delimited using dot notation. ie. key = 'key.child_key.grandchild_key[0]' """ val_iter, key_func = None, None if isinstance(value, dict): val_iter = six.iteritems(value) key_func = lambda k: key_base + '.' + k if key_base else k elif isinstance(value, (tuple, list)): val_iter = enumerate(value) key_func = lambda k: key_base + '[%d]' % k if val_iter: for k, v in val_iter: key_gen = key_func(k) if isinstance(v, dict) or isinstance(v, (tuple, list)): for key_gen, v in dict_to_keyval(v, key_gen): yield key_gen, v else: yield key_gen, v def update_nested(original_dict, updates): """Updates the leaf nodes in a nest dict. Updates occur without replacing entire sub-dicts. """ dict_to_update = copy.deepcopy(original_dict) for key, value in six.iteritems(updates): if isinstance(value, dict): sub_dict = update_nested(dict_to_update.get(key, {}), value) dict_to_update[key] = sub_dict else: dict_to_update[key] = updates[key] return dict_to_update class Model(object): """Base class for storage API models.""" def __init__(self, **kwds): self.fields = list(kwds) for k, v in six.iteritems(kwds): setattr(self, k, v) def as_dict(self): d = {} for f in self.fields: v = getattr(self, f) if isinstance(v, Model): v = v.as_dict() elif isinstance(v, list) and v and isinstance(v[0], Model): v = [sub.as_dict() for sub in v] d[f] = v return d def __eq__(self, other): return self.as_dict() == other.as_dict() @classmethod def get_field_names(cls): fields = inspect.getargspec(cls.__init__)[0] return set(fields) - set(["self"]) class Connection(object): """Base class for alarm storage system connections.""" # A dictionary representing the capabilities of this driver. CAPABILITIES = { 'alarms': {'query': {'simple': False, 'complex': False}, 'history': {'query': {'simple': False, 'complex': False}}}, } STORAGE_CAPABILITIES = { 'storage': {'production_ready': False}, } def __init__(self, conf, url): pass @staticmethod def upgrade(): """Migrate the database to `version` or the most recent version.""" @staticmethod def get_alarms(name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, alarm_type=None, severity=None, exclude=None): """Yields a lists of alarms that match filters. :param name: Optional name for alarm. :param user: Optional ID for user that owns the resource. :param state: Optional string for alarm state. :param meter: Optional string for alarms associated with meter. :param project: Optional ID for project that owns the resource. :param enabled: Optional boolean to list disable alarm. :param alarm_id: Optional alarm_id to return one alarm. :param alarm_type: Optional alarm type. :param severity: Optional alarm severity. :param exclude: Optional dict for inequality constraint. """ raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def create_alarm(alarm): """Create an alarm. Returns the alarm as created. :param alarm: The alarm to create. """ raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def update_alarm(alarm): """Update alarm.""" raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def delete_alarm(alarm_id): """Delete an alarm and its history data.""" raise aodh.NotImplementedError('Alarms not implemented') @staticmethod def get_alarm_changes(alarm_id, on_behalf_of, user=None, project=None, alarm_type=None, severity=None, start_timestamp=None, start_timestamp_op=None, end_timestamp=None, end_timestamp_op=None): """Yields list of AlarmChanges describing alarm history Changes are always sorted in reverse order of occurrence, given the importance of currency. Segregation for non-administrative users is done on the basis of the on_behalf_of parameter. This allows such users to have visibility on both the changes initiated by themselves directly (generally creation, rule changes, or deletion) and also on those changes initiated on their behalf by the alarming service (state transitions after alarm thresholds are crossed). :param alarm_id: ID of alarm to return changes for :param on_behalf_of: ID of tenant to scope changes query (None for administrative user, indicating all projects) :param user: Optional ID of user to return changes for :param project: Optional ID of project to return changes for :param alarm_type: Optional change type :param severity: Optional change severity :param start_timestamp: Optional modified timestamp start range :param start_timestamp_op: Optional timestamp start range operation :param end_timestamp: Optional modified timestamp end range :param end_timestamp_op: Optional timestamp end range operation """ raise aodh.NotImplementedError('Alarm history not implemented') @staticmethod def record_alarm_change(alarm_change): """Record alarm change event.""" raise aodh.NotImplementedError('Alarm history not implemented') @staticmethod def clear(): """Clear database.""" @staticmethod def query_alarms(filter_expr=None, orderby=None, limit=None): """Return an iterable of model.Alarm objects. :param filter_expr: Filter expression for query. :param orderby: List of field name and direction pairs for order by. :param limit: Maximum number of results to return. """ raise aodh.NotImplementedError('Complex query for alarms ' 'is not implemented.') @staticmethod def query_alarm_history(filter_expr=None, orderby=None, limit=None): """Return an iterable of model.AlarmChange objects. :param filter_expr: Filter expression for query. :param orderby: List of field name and direction pairs for order by. :param limit: Maximum number of results to return. """ raise aodh.NotImplementedError('Complex query for alarms ' 'history is not implemented.') @classmethod def get_capabilities(cls): """Return an dictionary with the capabilities of each driver.""" return cls.CAPABILITIES @classmethod def get_storage_capabilities(cls): """Return a dictionary representing the performance capabilities. This is needed to evaluate the performance of each driver. """ return cls.STORAGE_CAPABILITIES @staticmethod def clear_expired_alarm_history_data(alarm_history_ttl): """Clear expired alarm history data from the backend storage system. Clearing occurs according to the time-to-live. :param alarm_history_ttl: Number of seconds to keep alarm history records for. """ raise aodh.NotImplementedError('Clearing alarm history ' 'not implemented') aodh-2.0.6/aodh/storage/sqlalchemy/0000775000567000056710000000000013076064720020321 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/sqlalchemy/utils.py0000664000567000056710000000656013076064372022045 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import operator from sqlalchemy import and_ from sqlalchemy import asc from sqlalchemy import desc from sqlalchemy import not_ from sqlalchemy import or_ class QueryTransformer(object): operators = {"=": operator.eq, "<": operator.lt, ">": operator.gt, "<=": operator.le, "=<": operator.le, ">=": operator.ge, "=>": operator.ge, "!=": operator.ne, "in": lambda field_name, values: field_name.in_(values), "=~": lambda field, value: field.op("regexp")(value)} # operators which are differs for different dialects dialect_operators = {'postgresql': {'=~': (lambda field, value: field.op("~")(value))}} complex_operators = {"or": or_, "and": and_, "not": not_} ordering_functions = {"asc": asc, "desc": desc} def __init__(self, table, query, dialect='mysql'): self.table = table self.query = query self.dialect_name = dialect def _get_operator(self, op): return (self.dialect_operators.get(self.dialect_name, {}).get(op) or self.operators[op]) def _handle_complex_op(self, complex_op, nodes): op = self.complex_operators[complex_op] if op == not_: nodes = [nodes] element_list = [] for node in nodes: element = self._transform(node) element_list.append(element) return op(*element_list) def _handle_simple_op(self, simple_op, nodes): op = self._get_operator(simple_op) field_name, value = list(nodes.items())[0] return op(getattr(self.table, field_name), value) def _transform(self, sub_tree): operator, nodes = list(sub_tree.items())[0] if operator in self.complex_operators: return self._handle_complex_op(operator, nodes) else: return self._handle_simple_op(operator, nodes) def apply_filter(self, expression_tree): condition = self._transform(expression_tree) self.query = self.query.filter(condition) def apply_options(self, orderby, limit): self._apply_order_by(orderby) if limit is not None: self.query = self.query.limit(limit) def _apply_order_by(self, orderby): if orderby is not None: for field in orderby: attr, order = list(field.items())[0] ordering_function = self.ordering_functions[order] self.query = self.query.order_by(ordering_function( getattr(self.table, attr))) else: self.query = self.query.order_by(desc(self.table.timestamp)) def get_query(self): return self.query aodh-2.0.6/aodh/storage/sqlalchemy/models.py0000664000567000056710000001154613076064372022170 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for aodh data. """ import calendar import datetime import decimal import json from oslo_utils import timeutils from oslo_utils import units import six from sqlalchemy import Column, String, Index, Boolean, Text, DateTime from sqlalchemy.dialects.mysql import DECIMAL from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.types import TypeDecorator class JSONEncodedDict(TypeDecorator): """Represents an immutable structure as a json-encoded string.""" impl = Text @staticmethod def process_bind_param(value, dialect): if value is not None: value = json.dumps(value) return value @staticmethod def process_result_value(value, dialect): if value is not None: value = json.loads(value) return value class PreciseTimestamp(TypeDecorator): """Represents a timestamp precise to the microsecond.""" impl = DateTime def load_dialect_impl(self, dialect): if dialect.name == 'mysql': return dialect.type_descriptor(DECIMAL(precision=20, scale=6, asdecimal=True)) return dialect.type_descriptor(self.impl) @staticmethod def process_bind_param(value, dialect): if value is None: return value elif dialect.name == 'mysql': decimal.getcontext().prec = 30 return ( decimal.Decimal( str(calendar.timegm(value.utctimetuple()))) + (decimal.Decimal(str(value.microsecond)) / decimal.Decimal("1000000.0")) ) return value def compare_against_backend(self, dialect, conn_type): if dialect.name == 'mysql': return issubclass(type(conn_type), DECIMAL) return issubclass(type(conn_type), DateTime) @staticmethod def process_result_value(value, dialect): if value is None: return value elif dialect.name == 'mysql': integer = int(value) micro = (value - decimal.Decimal(integer)) * decimal.Decimal(units.M) daittyme = datetime.datetime.utcfromtimestamp(integer) return daittyme.replace(microsecond=int(round(micro))) return value class AodhBase(object): """Base class for Aodh Models.""" __table_args__ = {'mysql_charset': "utf8", 'mysql_engine': "InnoDB"} __table_initialized__ = False def __setitem__(self, key, value): setattr(self, key, value) def __getitem__(self, key): return getattr(self, key) def update(self, values): """Make the model object behave like a dict.""" for k, v in six.iteritems(values): setattr(self, k, v) Base = declarative_base(cls=AodhBase) class Alarm(Base): """Define Alarm data.""" __tablename__ = 'alarm' __table_args__ = ( Index('ix_alarm_user_id', 'user_id'), Index('ix_alarm_project_id', 'project_id'), ) alarm_id = Column(String(128), primary_key=True) enabled = Column(Boolean) name = Column(Text) type = Column(String(50)) severity = Column(String(50)) description = Column(Text) timestamp = Column(PreciseTimestamp, default=lambda: timeutils.utcnow()) user_id = Column(String(128)) project_id = Column(String(128)) state = Column(String(255)) state_timestamp = Column(PreciseTimestamp, default=lambda: timeutils.utcnow()) ok_actions = Column(JSONEncodedDict) alarm_actions = Column(JSONEncodedDict) insufficient_data_actions = Column(JSONEncodedDict) repeat_actions = Column(Boolean) rule = Column(JSONEncodedDict) time_constraints = Column(JSONEncodedDict) class AlarmChange(Base): """Define AlarmChange data.""" __tablename__ = 'alarm_history' __table_args__ = ( Index('ix_alarm_history_alarm_id', 'alarm_id'), ) event_id = Column(String(128), primary_key=True) alarm_id = Column(String(128)) on_behalf_of = Column(String(128)) project_id = Column(String(128)) user_id = Column(String(128)) type = Column(String(20)) detail = Column(Text) timestamp = Column(PreciseTimestamp, default=lambda: timeutils.utcnow()) severity = Column(String(50)) aodh-2.0.6/aodh/storage/sqlalchemy/alembic/0000775000567000056710000000000013076064720021715 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/sqlalchemy/alembic/alembic.ini0000664000567000056710000000105313076064371024013 0ustar jenkinsjenkins00000000000000[alembic] script_location = aodh.storage.sqlalchemy:alembic sqlalchemy.url = [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = WARN handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S aodh-2.0.6/aodh/storage/sqlalchemy/alembic/versions/0000775000567000056710000000000013076064720023565 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/sqlalchemy/alembic/versions/12fe8fac9fe4_initial_base.py0000664000567000056710000000705213076064372030640 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """initial base Revision ID: 12fe8fac9fe4 Revises: Create Date: 2015-07-28 17:38:37.022899 """ # revision identifiers, used by Alembic. revision = '12fe8fac9fe4' down_revision = None branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa import aodh.storage.sqlalchemy.models def upgrade(): op.create_table( 'alarm_history', sa.Column('event_id', sa.String(length=128), nullable=False), sa.Column('alarm_id', sa.String(length=128), nullable=True), sa.Column('on_behalf_of', sa.String(length=128), nullable=True), sa.Column('project_id', sa.String(length=128), nullable=True), sa.Column('user_id', sa.String(length=128), nullable=True), sa.Column('type', sa.String(length=20), nullable=True), sa.Column('detail', sa.Text(), nullable=True), sa.Column('timestamp', aodh.storage.sqlalchemy.models.PreciseTimestamp(), nullable=True), sa.PrimaryKeyConstraint('event_id') ) op.create_index( 'ix_alarm_history_alarm_id', 'alarm_history', ['alarm_id'], unique=False) op.create_table( 'alarm', sa.Column('alarm_id', sa.String(length=128), nullable=False), sa.Column('enabled', sa.Boolean(), nullable=True), sa.Column('name', sa.Text(), nullable=True), sa.Column('type', sa.String(length=50), nullable=True), sa.Column('severity', sa.String(length=50), nullable=True), sa.Column('description', sa.Text(), nullable=True), sa.Column('timestamp', aodh.storage.sqlalchemy.models.PreciseTimestamp(), nullable=True), sa.Column('user_id', sa.String(length=128), nullable=True), sa.Column('project_id', sa.String(length=128), nullable=True), sa.Column('state', sa.String(length=255), nullable=True), sa.Column('state_timestamp', aodh.storage.sqlalchemy.models.PreciseTimestamp(), nullable=True), sa.Column('ok_actions', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('alarm_actions', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('insufficient_data_actions', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('repeat_actions', sa.Boolean(), nullable=True), sa.Column('rule', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.Column('time_constraints', aodh.storage.sqlalchemy.models.JSONEncodedDict(), nullable=True), sa.PrimaryKeyConstraint('alarm_id') ) op.create_index( 'ix_alarm_project_id', 'alarm', ['project_id'], unique=False) op.create_index( 'ix_alarm_user_id', 'alarm', ['user_id'], unique=False) aodh-2.0.6/aodh/storage/sqlalchemy/alembic/versions/bb07adac380_add_severity_to_alarm_history.py0000664000567000056710000000204013076064371034134 0ustar jenkinsjenkins00000000000000# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """add severity to alarm history Revision ID: bb07adac380 Revises: 12fe8fac9fe4 Create Date: 2015-08-06 15:15:43.717068 """ # revision identifiers, used by Alembic. revision = 'bb07adac380' down_revision = '12fe8fac9fe4' branch_labels = None depends_on = None from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('alarm_history', sa.Column('severity', sa.String(length=50), nullable=True)) aodh-2.0.6/aodh/storage/sqlalchemy/alembic/script.py.mako0000664000567000056710000000204513076064371024524 0ustar jenkinsjenkins00000000000000# Copyright ${create_date.year} OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """${message} Revision ID: ${up_revision} Revises: ${down_revision | comma,n} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} branch_labels = ${repr(branch_labels)} depends_on = ${repr(depends_on)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} aodh-2.0.6/aodh/storage/sqlalchemy/alembic/env.py0000664000567000056710000000527113076064371023066 0ustar jenkinsjenkins00000000000000# # Copyright 2015 Huawei Technologies Co., Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import with_statement from alembic import context from logging.config import fileConfig from aodh.storage import impl_sqlalchemy from aodh.storage.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel # target_metadata = mymodel.Base.metadata target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_offline(): """Run migrations in 'offline' mode. This configures the context with just a URL and not an Engine, though an Engine is acceptable here as well. By skipping the Engine creation we don't even need a DBAPI to be available. Calls to context.execute() here emit the given string to the script output. """ conf = config.conf context.configure(url=conf.database.connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ conf = config.conf conn = impl_sqlalchemy.Connection(conf, conf.database.connection) connectable = conn._engine_facade.get_engine() with connectable.connect() as connection: context.configure( connection=connection, target_metadata=target_metadata ) with context.begin_transaction(): context.run_migrations() conn.disconnect() if not hasattr(config, "conf"): from aodh import service config.conf = service.prepare_service([]) if context.is_offline_mode(): run_migrations_offline() else: run_migrations_online() aodh-2.0.6/aodh/storage/sqlalchemy/__init__.py0000664000567000056710000000000013076064371022422 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/hbase/0000775000567000056710000000000013076064720017241 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/hbase/migration.py0000664000567000056710000000265613076064372021620 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """HBase storage backend migrations """ from aodh.storage.hbase import utils as hbase_utils def migrate_alarm_history_table(conn, table): """Migrate table 'alarm_h' in HBase. Change row format from ""%s_%s" % alarm_id, rts, to new separator format "%s:%s" % alarm_id, rts """ alarm_h_table = conn.table(table) alarm_h_filter = "RowFilter(=, 'regexstring:\\w*_\\d{19}')" gen = alarm_h_table.scan(filter=alarm_h_filter) for row, data in gen: row_parts = row.rsplit('_', 1) alarm_h_table.put(hbase_utils.prepare_key(*row_parts), data) alarm_h_table.delete(row) TABLE_MIGRATION_FUNCS = {'alarm_h': migrate_alarm_history_table} def migrate_tables(conn, tables): if type(tables) is not list: tables = [tables] for table in tables: if table in TABLE_MIGRATION_FUNCS: TABLE_MIGRATION_FUNCS.get(table)(conn, table) aodh-2.0.6/aodh/storage/hbase/utils.py0000664000567000056710000002033513076064372020761 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Various HBase helpers """ import copy import datetime import json import bson.json_util from happybase.hbase import ttypes from oslo_log import log import six from aodh.i18n import _ LOG = log.getLogger(__name__) OP_SIGN = {'eq': '=', 'lt': '<', 'le': '<=', 'ne': '!=', 'gt': '>', 'ge': '>='} # We need this additional dictionary because we have reverted timestamp in # row-keys for stored metrics OP_SIGN_REV = {'eq': '=', 'lt': '>', 'le': '>=', 'ne': '!=', 'gt': '<', 'ge': '<='} def timestamp(dt, reverse=True): """Timestamp is count of milliseconds since start of epoch. If reverse=True then timestamp will be reversed. Such a technique is used in HBase rowkey design when period queries are required. Because of the fact that rows are sorted lexicographically it's possible to vary whether the 'oldest' entries will be on top of the table or it should be the newest ones (reversed timestamp case). :param dt: datetime which is translated to timestamp :param reverse: a boolean parameter for reverse or straight count of timestamp in milliseconds :return: count or reversed count of milliseconds since start of epoch """ epoch = datetime.datetime(1970, 1, 1) td = dt - epoch ts = td.microseconds + td.seconds * 1000000 + td.days * 86400000000 return 0x7fffffffffffffff - ts if reverse else ts def make_timestamp_query(func, start=None, start_op=None, end=None, end_op=None, bounds_only=False, **kwargs): """Return a filter start and stop row for filtering and a query. Query is based on the fact that CF-name is 'rts'. :param start: Optional start timestamp :param start_op: Optional start timestamp operator, like gt, ge :param end: Optional end timestamp :param end_op: Optional end timestamp operator, like lt, le :param bounds_only: if True than query will not be returned :param func: a function that provide a format of row :param kwargs: kwargs for :param func """ # We don't need to dump here because get_start_end_rts returns strings rts_start, rts_end = get_start_end_rts(start, end) start_row, end_row = func(rts_start, rts_end, **kwargs) if bounds_only: return start_row, end_row q = [] start_op = start_op or 'ge' end_op = end_op or 'lt' if rts_start: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[start_op], rts_start)) if rts_end: q.append("SingleColumnValueFilter ('f', 'rts', %s, 'binary:%s')" % (OP_SIGN_REV[end_op], rts_end)) res_q = None if len(q): res_q = " AND ".join(q) return start_row, end_row, res_q def get_start_end_rts(start, end): rts_start = str(timestamp(start)) if start else "" rts_end = str(timestamp(end)) if end else "" return rts_start, rts_end def make_query(**kwargs): """Return a filter query string based on the selected parameters. :param kwargs: key-value pairs to filter on. Key should be a real column name in db """ q = [] # Note: we use extended constructor for SingleColumnValueFilter here. # It is explicitly specified that entry should not be returned if CF is not # found in table. for key, value in sorted(kwargs.items()): if value is not None: if key == 'source': q.append("SingleColumnValueFilter " "('f', 's_%s', =, 'binary:%s', true, true)" % (value, dump('1'))) elif key == 'trait_type': q.append("ColumnPrefixFilter('%s')" % value) elif key == 'event_id': q.append("RowFilter ( = , 'regexstring:\d*:%s')" % value) elif key == 'exclude': for k, v in six.iteritems(value): q.append("SingleColumnValueFilter " "('f', '%(k)s', !=, 'binary:%(v)s', true, true)" % {'k': quote(k), 'v': dump(v)}) else: q.append("SingleColumnValueFilter " "('f', '%s', =, 'binary:%s', true, true)" % (quote(key), dump(value))) if len(q): return " AND ".join(q) def make_general_rowkey_scan(rts_start=None, rts_end=None, some_id=None): """If it's filter on some_id without start and end. start_row = some_id while end_row = some_id + MAX_BYTE. """ if some_id is None: return None, None if not rts_start: # NOTE(idegtiarov): Here we could not use chr > 122 because chr >= 123 # will be quoted and character will be turn in a composition that is # started with '%' (chr(37)) that lexicographically is less then chr # of number rts_start = chr(122) end_row = prepare_key(some_id, rts_start) start_row = prepare_key(some_id, rts_end) return start_row, end_row def prepare_key(*args): """Prepares names for rows and columns with correct separator. :param args: strings or numbers that we want our key construct of :return: key with quoted args that are separated with character ":" """ key_quote = [] for key in args: if isinstance(key, six.integer_types): key = str(key) key_quote.append(quote(key)) return ":".join(key_quote) def deserialize_entry(entry): """Return a list of flatten results. Result contains a dict of simple structures such as 'resource_id':1 :param entry: entry from HBase, without row name and timestamp """ flatten_result = {} for k, v in entry.items(): if ':' in k[2:]: key = tuple([unquote(i) for i in k[2:].split(':')]) else: key = unquote(k[2:]) flatten_result[key] = load(v) return flatten_result def serialize_entry(data=None, **kwargs): """Return a dict that is ready to be stored to HBase :param data: dict to be serialized :param kwargs: additional args """ data = data or {} entry_dict = copy.copy(data) entry_dict.update(**kwargs) result = {} for k, v in entry_dict.items(): result['f:' + quote(k, ':')] = dump(v) return result def dump(data): return json.dumps(data, default=bson.json_util.default) def load(data): return json.loads(data, object_hook=object_hook) # We don't want to have tzinfo in decoded json.This object_hook is # overwritten json_util.object_hook for $date def object_hook(dct): if "$date" in dct: dt = bson.json_util.object_hook(dct) return dt.replace(tzinfo=None) return bson.json_util.object_hook(dct) def create_tables(conn, tables, column_families): for table in tables: try: conn.create_table(table, column_families) except ttypes.AlreadyExists: if conn.table_prefix: table = ("%(table_prefix)s" "%(separator)s" "%(table_name)s" % dict(table_prefix=conn.table_prefix, separator=conn.table_prefix_separator, table_name=table)) LOG.warning(_("Cannot create table %(table_name)s " "it already exists. Ignoring error") % {'table_name': table}) def quote(s, *args): """Return quoted string even if it is unicode one. :param s: string that should be quoted :param args: any symbol we want to stay unquoted """ s_en = s.encode('utf8') return six.moves.urllib.parse.quote(s_en, *args) def unquote(s): """Return unquoted and decoded string. :param s: string that should be unquoted """ s_de = six.moves.urllib.parse.unquote(s) return s_de.decode('utf8') aodh-2.0.6/aodh/storage/hbase/__init__.py0000664000567000056710000000000013076064372021343 0ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/storage/hbase/inmemory.py0000664000567000056710000002261513076064372021463 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is a very crude version of "in-memory HBase", which implements just enough functionality of HappyBase API to support testing of our driver. """ import copy import re from oslo_log import log import six import aodh LOG = log.getLogger(__name__) class MTable(object): """HappyBase.Table mock.""" def __init__(self, name, families): self.name = name self.families = families self._rows_with_ts = {} def row(self, key, columns=None): if key not in self._rows_with_ts: return {} res = copy.copy(sorted(six.iteritems( self._rows_with_ts.get(key)))[-1][1]) if columns: keys = res.keys() for key in keys: if key not in columns: res.pop(key) return res def rows(self, keys): return ((k, self.row(k)) for k in keys) def put(self, key, data, ts=None): # Note: Now we use 'timestamped' but only for one Resource table. # That's why we may put ts='0' in case when ts is None. If it is # needed to use 2 types of put in one table ts=0 cannot be used. if ts is None: ts = "0" if key not in self._rows_with_ts: self._rows_with_ts[key] = {ts: data} else: if ts in self._rows_with_ts[key]: self._rows_with_ts[key][ts].update(data) else: self._rows_with_ts[key].update({ts: data}) def delete(self, key): del self._rows_with_ts[key] def _get_latest_dict(self, row): # The idea here is to return latest versions of columns. # In _rows_with_ts we store {row: {ts_1: {data}, ts_2: {data}}}. # res will contain a list of tuples [(ts_1, {data}), (ts_2, {data})] # sorted by ts, i.e. in this list ts_2 is the most latest. # To get result as HBase provides we should iterate in reverse order # and get from "latest" data only key-values that are not in newer data data = {} for i in sorted(six.iteritems(self._rows_with_ts[row])): data.update(i[1]) return data def scan(self, filter=None, columns=None, row_start=None, row_stop=None, limit=None): columns = columns or [] sorted_keys = sorted(self._rows_with_ts) # copy data between row_start and row_stop into a dict rows = {} for row in sorted_keys: if row_start and row < row_start: continue if row_stop and row > row_stop: break rows[row] = self._get_latest_dict(row) if columns: ret = {} for row, data in six.iteritems(rows): for key in data: if key in columns: ret[row] = data rows = ret if filter: # TODO(jdanjou): we should really parse this properly, # but at the moment we are only going to support AND here filters = filter.split('AND') for f in filters: # Extract filter name and its arguments g = re.search("(.*)\((.*),?\)", f) fname = g.group(1).strip() fargs = [s.strip().replace('\'', '') for s in g.group(2).split(',')] m = getattr(self, fname) if callable(m): # overwrite rows for filtering to take effect # in case of multiple filters rows = m(fargs, rows) else: raise aodh.NotImplementedError( "%s filter is not implemented, " "you may want to add it!") for k in sorted(rows)[:limit]: yield k, rows[k] @staticmethod def SingleColumnValueFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'SingleColumnValueFilter' is found in the 'filter' argument. """ op = args[2] column = "%s:%s" % (args[0], args[1]) value = args[3] if value.startswith('binary:'): value = value[7:] r = {} for row in rows: data = rows[row] if op == '=': if column in data and data[column] == value: r[row] = data elif op == '<': if column in data and data[column] < value: r[row] = data elif op == '<=': if column in data and data[column] <= value: r[row] = data elif op == '>': if column in data and data[column] > value: r[row] = data elif op == '>=': if column in data and data[column] >= value: r[row] = data elif op == '!=': if column in data and data[column] != value: r[row] = data return r @staticmethod def ColumnPrefixFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'ColumnPrefixFilter' is found in the 'filter' argument. :param args: a list of filter arguments, contain prefix of column :param rows: a dict of row prefixes for filtering """ value = args[0] column = 'f:' + value r = {} for row, data in rows.items(): column_dict = {} for key in data: if key.startswith(column): column_dict[key] = data[key] r[row] = column_dict return r @staticmethod def RowFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'RowFilter' is found in the 'filter' argument. :param args: a list of filter arguments, it contains operator and sought string :param rows: a dict of rows which are filtered """ op = args[0] value = args[1] if value.startswith('binary:'): value = value[len('binary:'):] if value.startswith('regexstring:'): value = value[len('regexstring:'):] r = {} for row, data in rows.items(): try: g = re.search(value, row).group() if op == '=': if g == row: r[row] = data else: raise aodh.NotImplementedError( "In-memory " "RowFilter doesn't support " "the %s operation yet" % op) except AttributeError: pass return r @staticmethod def QualifierFilter(args, rows): """This is filter for testing "in-memory HBase". This method is called from scan() when 'QualifierFilter' is found in the 'filter' argument """ op = args[0] value = args[1] is_regex = False if value.startswith('binaryprefix:'): value = value[len('binaryprefix:'):] if value.startswith('regexstring:'): value = value[len('regexstring:'):] is_regex = True column = 'f:' + value r = {} for row in rows: data = rows[row] r_data = {} for key in data: if ((op == '=' and key.startswith(column)) or (op == '>=' and key >= column) or (op == '<=' and key <= column) or (op == '>' and key > column) or (op == '<' and key < column) or (is_regex and re.search(value, key))): r_data[key] = data[key] else: raise aodh.NotImplementedError( "In-memory QualifierFilter " "doesn't support the %s " "operation yet" % op) if r_data: r[row] = r_data return r class MConnectionPool(object): def __init__(self): self.conn = MConnection() def connection(self): return self.conn class MConnection(object): """HappyBase.Connection mock.""" def __init__(self): self.tables = {} def __enter__(self, *args, **kwargs): return self def __exit__(self, exc_type, exc_val, exc_tb): pass @staticmethod def open(): LOG.debug("Opening in-memory HBase connection") def create_table(self, n, families=None): families = families or {} if n in self.tables: return self.tables[n] t = MTable(n, families) self.tables[n] = t return t def delete_table(self, name, use_prefix=True): del self.tables[name] def table(self, name): return self.create_table(name) aodh-2.0.6/aodh/storage/hbase/base.py0000664000567000056710000000533213076064372020533 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import happybase from oslo_log import log from oslo_utils import netutils from six.moves.urllib import parse as urlparse from aodh.storage.hbase import inmemory as hbase_inmemory LOG = log.getLogger(__name__) class Connection(object): """Base connection class for HBase.""" _memory_instance = None def __init__(self, conf, url): """Hbase Connection Initialization.""" opts = self._parse_connection_url(url) if opts['host'] == '__test__': # This is a in-memory usage for unit tests if Connection._memory_instance is None: LOG.debug('Creating a new in-memory HBase Connection object') Connection._memory_instance = (hbase_inmemory. MConnectionPool()) self.conn_pool = Connection._memory_instance else: self.conn_pool = self._get_connection_pool(opts) @staticmethod def _get_connection_pool(conf): """Return a connection pool to the database. .. note:: The tests use a subclass to override this and return an in-memory connection pool. """ LOG.debug('connecting to HBase on %(host)s:%(port)s', {'host': conf['host'], 'port': conf['port']}) return happybase.ConnectionPool(size=100, host=conf['host'], port=conf['port'], table_prefix=conf['table_prefix']) @staticmethod def _parse_connection_url(url): """Parse connection parameters from a database url. .. note:: HBase Thrift does not support authentication and there is no database name, so we are not looking for these in the url. """ opts = {} result = netutils.urlsplit(url) opts['table_prefix'] = urlparse.parse_qs( result.query).get('table_prefix', [None])[0] opts['dbtype'] = result.scheme if ':' in result.netloc: opts['host'], port = result.netloc.split(':') else: opts['host'] = result.netloc port = 9090 opts['port'] = port and int(port) or 9090 return opts aodh-2.0.6/aodh/storage/impl_log.py0000664000567000056710000000363513076064372020345 0ustar jenkinsjenkins00000000000000# # Copyright 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simple logging storage backend. """ from oslo_log import log from aodh.i18n import _LI from aodh.storage import base LOG = log.getLogger(__name__) class Connection(base.Connection): """Log the data.""" @staticmethod def upgrade(): pass @staticmethod def clear(): pass @staticmethod def get_alarms(name=None, user=None, state=None, meter=None, project=None, enabled=None, alarm_id=None, alarm_type=None, severity=None, exclude=None): """Yields a lists of alarms that match filters.""" return [] @staticmethod def create_alarm(alarm): """Create alarm.""" return alarm @staticmethod def update_alarm(alarm): """Update alarm.""" return alarm @staticmethod def delete_alarm(alarm_id): """Delete an alarm and its history data.""" @staticmethod def clear_expired_alarm_history_data(alarm_history_ttl): """Clear expired alarm history data from the backend storage system. Clearing occurs according to the time-to-live. :param alarm_history_ttl: Number of seconds to keep alarm history records for. """ LOG.info(_LI('Dropping alarm history data with TTL %d'), alarm_history_ttl) aodh-2.0.6/aodh/evaluator/0000775000567000056710000000000013076064720016515 5ustar jenkinsjenkins00000000000000aodh-2.0.6/aodh/evaluator/threshold.py0000664000567000056710000002146013076064372021071 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2015 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import datetime import operator import six from ceilometerclient import client as ceiloclient from oslo_log import log from oslo_utils import timeutils from aodh import evaluator from aodh.evaluator import utils from aodh.i18n import _, _LW from aodh import keystone_client LOG = log.getLogger(__name__) COMPARATORS = { 'gt': operator.gt, 'lt': operator.lt, 'ge': operator.ge, 'le': operator.le, 'eq': operator.eq, 'ne': operator.ne, } class ThresholdEvaluator(evaluator.Evaluator): # the sliding evaluation window is extended to allow # for reporting/ingestion lag look_back = 1 def __init__(self, conf): super(ThresholdEvaluator, self).__init__(conf) self._cm_client = None @property def cm_client(self): if self._cm_client is None: auth_config = self.conf.service_credentials self._cm_client = ceiloclient.get_client( version=2, session=keystone_client.get_session(self.conf), # ceiloclient adapter options region_name=auth_config.region_name, interface=auth_config.interface, ) return self._cm_client @classmethod def _bound_duration(cls, rule): """Bound the duration of the statistics query.""" now = timeutils.utcnow() # when exclusion of weak datapoints is enabled, we extend # the look-back period so as to allow a clearer sample count # trend to be established look_back = (cls.look_back if not rule.get('exclude_outliers') else rule['evaluation_periods']) window = ((rule.get('period', None) or rule['granularity']) * (rule['evaluation_periods'] + look_back)) start = now - datetime.timedelta(seconds=window) LOG.debug('query stats from %(start)s to ' '%(now)s', {'start': start, 'now': now}) return start.isoformat(), now.isoformat() @staticmethod def _sanitize(rule, statistics): """Sanitize statistics.""" LOG.debug('sanitize stats %s', statistics) if rule.get('exclude_outliers'): key = operator.attrgetter('count') mean = utils.mean(statistics, key) stddev = utils.stddev(statistics, key, mean) lower = mean - 2 * stddev upper = mean + 2 * stddev inliers, outliers = utils.anomalies(statistics, key, lower, upper) if outliers: LOG.debug('excluded weak datapoints with sample counts %s', [s.count for s in outliers]) statistics = inliers else: LOG.debug('no excluded weak datapoints') # in practice statistics are always sorted by period start, not # strictly required by the API though statistics = statistics[-rule['evaluation_periods']:] result_statistics = [getattr(stat, rule['statistic']) for stat in statistics] LOG.debug('pruned statistics to %d', len(statistics)) return result_statistics def _statistics(self, rule, start, end): """Retrieve statistics over the current window.""" after = dict(field='timestamp', op='ge', value=start) before = dict(field='timestamp', op='le', value=end) query = copy.copy(rule['query']) query.extend([before, after]) LOG.debug('stats query %s', query) try: return self.cm_client.statistics.list( meter_name=rule['meter_name'], q=query, period=rule['period']) except Exception: LOG.exception(_('alarm stats retrieval failed')) return [] @staticmethod def _reason_data(disposition, count, most_recent): """Create a reason data dictionary for this evaluator type.""" return {'type': 'threshold', 'disposition': disposition, 'count': count, 'most_recent': most_recent} @classmethod def _reason(cls, alarm, statistics, state, count): """Fabricate reason string.""" if state == evaluator.OK: disposition = 'inside' count = len(statistics) - count else: disposition = 'outside' last = statistics[-1] if statistics else None transition = alarm.state != state reason_data = cls._reason_data(disposition, count, last) if transition: return (_('Transition to %(state)s due to %(count)d samples' ' %(disposition)s threshold, most recent:' ' %(most_recent)s') % dict(reason_data, state=state)), reason_data return (_('Remaining as %(state)s due to %(count)d samples' ' %(disposition)s threshold, most recent: %(most_recent)s') % dict(reason_data, state=state)), reason_data def evaluate_rule(self, alarm_rule): """Evaluate alarm rule. :returns: state, trending state and statistics. """ start, end = self._bound_duration(alarm_rule) statistics = self._statistics(alarm_rule, start, end) statistics = self._sanitize(alarm_rule, statistics) sufficient = len(statistics) >= alarm_rule['evaluation_periods'] if not sufficient: return evaluator.UNKNOWN, None, statistics, len(statistics) def _compare(value): op = COMPARATORS[alarm_rule['comparison_operator']] limit = alarm_rule['threshold'] LOG.debug('comparing value %(value)s against threshold' ' %(limit)s', {'value': value, 'limit': limit}) return op(value, limit) compared = list(six.moves.map(_compare, statistics)) distilled = all(compared) unequivocal = distilled or not any(compared) number_outside = len([c for c in compared if c]) if unequivocal: state = evaluator.ALARM if distilled else evaluator.OK return state, None, statistics, number_outside else: trending_state = evaluator.ALARM if compared[-1] else evaluator.OK return None, trending_state, statistics, number_outside def _transition_alarm(self, alarm, state, trending_state, statistics, outside_count): unknown = alarm.state == evaluator.UNKNOWN continuous = alarm.repeat_actions if trending_state: if unknown or continuous: state = trending_state if unknown else alarm.state reason, reason_data = self._reason(alarm, statistics, state, outside_count) self._refresh(alarm, state, reason, reason_data) return if state == evaluator.UNKNOWN and not unknown: LOG.warning(_LW('Expecting %(expected)d datapoints but only get ' '%(actual)d') % { 'expected': alarm.rule['evaluation_periods'], 'actual': len(statistics)}) # Reason is not same as log message because we want to keep # consistent since thirdparty software may depend on old format. reason = _('%d datapoints are unknown') % alarm.rule[ 'evaluation_periods'] last = None if not statistics else statistics[-1] reason_data = self._reason_data('unknown', alarm.rule['evaluation_periods'], last) self._refresh(alarm, state, reason, reason_data) elif state and (alarm.state != state or continuous): reason, reason_data = self._reason(alarm, statistics, state, outside_count) self._refresh(alarm, state, reason, reason_data) def evaluate(self, alarm): if not self.within_time_constraint(alarm): LOG.debug('Attempted to evaluate alarm %s, but it is not ' 'within its time constraint.', alarm.alarm_id) return state, trending_state, statistics, outside_count = self.evaluate_rule( alarm.rule) self._transition_alarm(alarm, state, trending_state, statistics, outside_count) aodh-2.0.6/aodh/evaluator/event.py0000664000567000056710000002206613076064372020221 0ustar jenkinsjenkins00000000000000# # Copyright 2015 NEC Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import fnmatch import operator from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from aodh import evaluator from aodh.i18n import _, _LE LOG = log.getLogger(__name__) COMPARATORS = { 'gt': operator.gt, 'lt': operator.lt, 'ge': operator.ge, 'le': operator.le, 'eq': operator.eq, 'ne': operator.ne, } OPTS = [ cfg.IntOpt('event_alarm_cache_ttl', default=60, help='TTL of event alarm caches, in seconds. ' 'Set to 0 to disable caching.'), ] def _sanitize_trait_value(value, trait_type): if trait_type in (2, 'integer'): return int(value) elif trait_type in (3, 'float'): return float(value) elif trait_type in (4, 'datetime'): return timeutils.normalize_time(timeutils.parse_isotime(value)) else: return six.text_type(value) class InvalidEvent(Exception): """Error raised when the received event is missing mandatory fields.""" class Event(object): """Wrapped event object to hold converted values for this evaluator.""" TRAIT_FIELD = 0 TRAIT_TYPE = 1 TRAIT_VALUE = 2 def __init__(self, event): self.obj = event self._validate() self.id = event.get('message_id') self._parse_traits() def _validate(self): """Validate received event has mandatory parameters.""" if not self.obj: LOG.error(_LE('Received invalid event (empty or None)')) raise InvalidEvent() if not self.obj.get('event_type'): LOG.error(_LE('Failed to extract event_type from event = %s'), self.obj) raise InvalidEvent() if not self.obj.get('message_id'): LOG.error(_LE('Failed to extract message_id from event = %s'), self.obj) raise InvalidEvent() def _parse_traits(self): self.traits = {} self.project = '' for t in self.obj.get('traits', []): k = t[self.TRAIT_FIELD] v = _sanitize_trait_value(t[self.TRAIT_VALUE], t[self.TRAIT_TYPE]) self.traits[k] = v if k in ('tenant_id', 'project_id'): self.project = v def get_value(self, field): if field.startswith('traits.'): key = field.split('.', 1)[-1] return self.traits.get(key) v = self.obj for f in field.split('.'): if hasattr(v, 'get'): v = v.get(f) else: return None return v class Alarm(object): """Wrapped alarm object to hold converted values for this evaluator.""" TRAIT_TYPES = { 'none': 0, 'string': 1, 'integer': 2, 'float': 3, 'datetime': 4, } def __init__(self, alarm): self.obj = alarm self.id = alarm.alarm_id self._parse_query() def _parse_query(self): self.query = [] for q in self.obj.rule.get('query', []): if not q['field'].startswith('traits.'): self.query.append(q) continue type_num = self.TRAIT_TYPES[q.get('type') or 'string'] field = q['field'] value = _sanitize_trait_value(q.get('value'), type_num) op = COMPARATORS[q.get('op', 'eq')] self.query.append({'field': field, 'value': value, 'op': op}) def fired_and_no_repeat(self): return (not self.obj.repeat_actions and self.obj.state == evaluator.ALARM) def event_type_to_watch(self, event_type): return fnmatch.fnmatch(event_type, self.obj.rule['event_type']) class EventAlarmEvaluator(evaluator.Evaluator): def __init__(self, conf): super(EventAlarmEvaluator, self).__init__(conf) self.caches = {} def evaluate_events(self, events): """Evaluate the events by referring related alarms.""" if not isinstance(events, list): events = [events] LOG.debug('Starting event alarm evaluation: #events = %d', len(events)) for e in events: LOG.debug('Evaluating event: event = %s', e) try: event = Event(e) except InvalidEvent: LOG.debug('Aborting evaluation of the event.') continue for id, alarm in six.iteritems( self._get_project_alarms(event.project)): try: self._evaluate_alarm(alarm, event) except Exception: LOG.exception(_LE('Failed to evaluate alarm (id=%(a)s) ' 'triggered by event = %(e)s.'), {'a': id, 'e': e}) LOG.debug('Finished event alarm evaluation.') def _get_project_alarms(self, project): if self.conf.event_alarm_cache_ttl and project in self.caches: if timeutils.is_older_than(self.caches[project]['updated'], self.conf.event_alarm_cache_ttl): del self.caches[project] else: return self.caches[project]['alarms'] # TODO(r-mibu): Implement "changes-since" at the storage API and make # this function update only alarms changed from the last access. alarms = {a.alarm_id: Alarm(a) for a in self._storage_conn.get_alarms(enabled=True, alarm_type='event', project=project)} if self.conf.event_alarm_cache_ttl: self.caches[project] = { 'alarms': alarms, 'updated': timeutils.utcnow() } return alarms def _evaluate_alarm(self, alarm, event): """Evaluate the alarm by referring the received event. This function compares each condition of the alarm on the assumption that all conditions are combined by AND operator. When the received event met conditions defined in alarm 'event_type' and 'query', the alarm will be fired and updated to state='alarm' (alarmed). Note: by this evaluator, the alarm won't be changed to state='ok' nor state='insufficient data'. """ LOG.debug('Evaluating alarm (id=%(a)s) triggered by event ' '(message_id=%(e)s).', {'a': alarm.id, 'e': event.id}) if alarm.fired_and_no_repeat(): LOG.debug('Skip evaluation of the alarm id=%s which have already ' 'fired.', alarm.id) return if not alarm.event_type_to_watch(event.obj['event_type']): LOG.debug('Aborting evaluation of the alarm (id=%s) since ' 'event_type is not matched.', alarm.id) return def _compare(condition): v = event.get_value(condition['field']) LOG.debug('Comparing value=%(v)s against condition=%(c)s .', {'v': v, 'c': condition}) return condition['op'](v, condition['value']) for condition in alarm.query: if not _compare(condition): LOG.debug('Aborting evaluation of the alarm due to ' 'unmet condition=%s .', condition) return self._fire_alarm(alarm, event) def _fire_alarm(self, alarm, event): """Update alarm state and fire alarm via alarm notifier.""" state = evaluator.ALARM reason = (_('Event (message_id=%(message)s) hit the query of alarm ' '(id=%(alarm)s)') % {'message': event.id, 'alarm': alarm.id}) reason_data = {'type': 'event', 'event': event.obj} always_record = alarm.obj.repeat_actions self._refresh(alarm.obj, state, reason, reason_data, always_record) def _refresh(self, alarm, state, reason, reason_data, always_record): super(EventAlarmEvaluator, self)._refresh(alarm, state, reason, reason_data, always_record) project = alarm.project_id if self.conf.event_alarm_cache_ttl and project in self.caches: self.caches[project]['alarms'][alarm.alarm_id].obj.state = state # NOTE(r-mibu): This method won't be used, but we have to define here in # order to overwrite the abstract method in the super class. # TODO(r-mibu): Change the base (common) class design for evaluators. def evaluate(self, alarm): pass aodh-2.0.6/aodh/evaluator/composite.py0000664000567000056710000002102013076064372021067 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from oslo_log import log import six from stevedore import NamedExtensionManager from aodh import evaluator from aodh.i18n import _ LOG = log.getLogger(__name__) STATE_CHANGE = {evaluator.ALARM: 'outside their threshold.', evaluator.OK: 'inside their threshold.', evaluator.UNKNOWN: 'state evaluated to unknown.'} class RuleTarget(object): def __init__(self, rule, rule_evaluator, rule_name): self.rule = rule self.type = rule.get('type') self.rule_evaluator = rule_evaluator self.rule_name = rule_name self.state = None self.trending_state = None self.statistics = None self.evaluated = False def evaluate(self): # Evaluate a sub-rule of composite rule if not self.evaluated: LOG.debug('Evaluating %(type)s rule: %(rule)s', {'type': self.type, 'rule': self.rule}) self.state, self.trending_state, self.statistics, __ = \ self.rule_evaluator.evaluate_rule(self.rule) self.evaluated = True class RuleEvaluationBase(object): def __init__(self, rule_target): self.rule_target = rule_target def __str__(self): return self.rule_target.rule_name class OkEvaluation(RuleEvaluationBase): def __bool__(self): self.rule_target.evaluate() return self.rule_target.state == evaluator.OK __nonzero__ = __bool__ class AlarmEvaluation(RuleEvaluationBase): def __bool__(self): self.rule_target.evaluate() return self.rule_target.state == evaluator.ALARM __nonzero__ = __bool__ class AndOp(object): def __init__(self, rule_targets): self.rule_targets = rule_targets def __bool__(self): return all(self.rule_targets) def __str__(self): return '(' + ' and '.join(six.moves.map(str, self.rule_targets)) + ')' __nonzero__ = __bool__ class OrOp(object): def __init__(self, rule_targets): self.rule_targets = rule_targets def __bool__(self): return any(self.rule_targets) def __str__(self): return '(' + ' or '.join(six.moves.map(str, self.rule_targets)) + ')' __nonzero__ = __bool__ class CompositeEvaluator(evaluator.Evaluator): def __init__(self, conf): super(CompositeEvaluator, self).__init__(conf) self.conf = conf self._threshold_evaluators = None self.rule_targets = [] self.rule_name_prefix = 'rule' self.rule_num = 0 @property def threshold_evaluators(self): if not self._threshold_evaluators: threshold_types = ('threshold', 'gnocchi_resources_threshold', 'gnocchi_aggregation_by_metrics_threshold', 'gnocchi_aggregation_by_resources_threshold') self._threshold_evaluators = NamedExtensionManager( 'aodh.evaluator', threshold_types, invoke_on_load=True, invoke_args=(self.conf,)) return self._threshold_evaluators def _parse_composite_rule(self, alarm_rule): """Parse the composite rule. The composite rule is assembled by sub threshold rules with 'and', 'or', the form can be nested. e.g. the form of composite rule can be like this: { "and": [threshold_rule0, threshold_rule1, {'or': [threshold_rule2, threshold_rule3, threshold_rule4, threshold_rule5]}] } """ if (isinstance(alarm_rule, dict) and len(alarm_rule) == 1 and list(alarm_rule)[0] in ('and', 'or')): and_or_key = list(alarm_rule)[0] if and_or_key == 'and': rules = (self._parse_composite_rule(r) for r in alarm_rule['and']) rules_alarm, rules_ok = zip(*rules) return AndOp(rules_alarm), OrOp(rules_ok) else: rules = (self._parse_composite_rule(r) for r in alarm_rule['or']) rules_alarm, rules_ok = zip(*rules) return OrOp(rules_alarm), AndOp(rules_ok) else: rule_evaluator = self.threshold_evaluators[alarm_rule['type']].obj self.rule_num += 1 name = self.rule_name_prefix + str(self.rule_num) rule = RuleTarget(alarm_rule, rule_evaluator, name) self.rule_targets.append(rule) return AlarmEvaluation(rule), OkEvaluation(rule) def _reason(self, alarm, new_state, rule_target_alarm): transition = alarm.state != new_state reason_data = { 'type': 'composite', 'composition_form': str(rule_target_alarm)} root_cause_rules = {} for rule in self.rule_targets: if rule.state == new_state: root_cause_rules.update({rule.rule_name: rule.rule}) reason_data.update(causative_rules=root_cause_rules) params = {'state': new_state, 'expression': str(rule_target_alarm), 'rules': ', '.join(sorted(root_cause_rules)), 'description': STATE_CHANGE[new_state]} if transition: reason = (_('Composite rule alarm with composition form: ' '%(expression)s transition to %(state)s, due to ' 'rules: %(rules)s %(description)s') % params) else: reason = (_('Composite rule alarm with composition form: ' '%(expression)s remaining as %(state)s, due to ' 'rules: %(rules)s %(description)s') % params) return reason, reason_data def _evaluate_sufficient(self, alarm, rule_target_alarm, rule_target_ok): # Some of evaluated rules are unknown states or trending states. unknown = alarm.state == evaluator.UNKNOWN continuous = alarm.repeat_actions if unknown or continuous: for rule in self.rule_targets: if rule.trending_state: rule.state = (rule.trending_state if unknown else alarm.state) alarm_triggered = bool(rule_target_alarm) if alarm_triggered: reason, reason_data = self._reason(alarm, evaluator.ALARM, rule_target_alarm) self._refresh(alarm, evaluator.ALARM, reason, reason_data) return True ok_result = bool(rule_target_ok) if ok_result: reason, reason_data = self._reason(alarm, evaluator.OK, rule_target_alarm) self._refresh(alarm, evaluator.OK, reason, reason_data) return True return False def evaluate(self, alarm): if not self.within_time_constraint(alarm): LOG.debug('Attempted to evaluate alarm %s, but it is not ' 'within its time constraint.', alarm.alarm_id) return LOG.debug("Evaluating composite rule alarm %s ...", alarm.alarm_id) self.rule_targets = [] self.rule_num = 0 rule_target_alarm, rule_target_ok = self._parse_composite_rule( alarm.rule) sufficient = self._evaluate_sufficient(alarm, rule_target_alarm, rule_target_ok) if not sufficient: for rule in self.rule_targets: rule.evaluate() sufficient = self._evaluate_sufficient(alarm, rule_target_alarm, rule_target_ok) if not sufficient: # The following unknown situations is like these: # 1. 'unknown' and 'alarm' # 2. 'unknown' or 'ok' reason, reason_data = self._reason(alarm, evaluator.UNKNOWN, rule_target_alarm) if alarm.state != evaluator.UNKNOWN: self._refresh(alarm, evaluator.UNKNOWN, reason, reason_data) else: LOG.debug(reason) aodh-2.0.6/aodh/evaluator/utils.py0000664000567000056710000000317513076064371020237 0ustar jenkinsjenkins00000000000000# # Copyright 2014 Red Hat, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math def mean(s, key=lambda x: x): """Calculate the mean of a numeric list.""" count = float(len(s)) if count: return math.fsum(map(key, s)) / count return 0.0 def deltas(s, key, m=None): """Calculate the squared distances from mean for a numeric list.""" m = m or mean(s, key) return [(key(i) - m) ** 2 for i in s] def variance(s, key, m=None): """Calculate the variance of a numeric list.""" return mean(deltas(s, key, m)) def stddev(s, key, m=None): """Calculate the standard deviation of a numeric list.""" return math.sqrt(variance(s, key, m)) def outside(s, key, lower=0.0, upper=0.0): """Determine if value falls outside upper and lower bounds.""" v = key(s) return v < lower or v > upper def anomalies(s, key, lower=0.0, upper=0.0): """Separate anomalous data points from the in-liers.""" inliers = [] outliers = [] for i in s: if outside(i, key, lower, upper): outliers.append(i) else: inliers.append(i) return inliers, outliers aodh-2.0.6/aodh/evaluator/combination.py0000664000567000056710000001112013076064372021367 0ustar jenkinsjenkins00000000000000# # Copyright 2013 eNovance # # Authors: Mehdi Abaakouk # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from six import moves from aodh import evaluator from aodh.i18n import _, _LE LOG = log.getLogger(__name__) COMPARATORS = {'and': all, 'or': any} class CombinationEvaluator(evaluator.Evaluator): def _get_alarm_state(self, alarm_id): try: alarms = self._storage_conn.get_alarms(alarm_id=alarm_id) except Exception: LOG.exception(_LE('alarm %s retrieval failed'), alarm_id) return None if not alarms: LOG.error(_LE("alarm %s doesn't exists anymore"), alarm_id) return None return list(alarms)[0].state def _sufficient_states(self, alarm, states): """Check for the sufficiency of the data for evaluation. Ensure that there is sufficient data for evaluation, transitioning to unknown otherwise. """ # note(sileht): alarm can be evaluated only with # stable state of other alarm alarms_missing_states = [alarm_id for alarm_id, state in states if not state or state == evaluator.UNKNOWN] sufficient = len(alarms_missing_states) == 0 if not sufficient and alarm.rule['operator'] == 'or': # if operator is 'or' and there is one alarm, then the combinated # alarm's state should be 'alarm' sufficient = bool([alarm_id for alarm_id, state in states if state == evaluator.ALARM]) if not sufficient and alarm.state != evaluator.UNKNOWN: reason = (_('Alarms %(alarm_ids)s' ' are in unknown state') % {'alarm_ids': ",".join(alarms_missing_states)}) reason_data = self._reason_data(alarms_missing_states) self._refresh(alarm, evaluator.UNKNOWN, reason, reason_data) return sufficient @staticmethod def _reason_data(alarm_ids): """Create a reason data dictionary for this evaluator type.""" return {'type': 'combination', 'alarm_ids': alarm_ids} @classmethod def _reason(cls, alarm, state, underlying_states): """Fabricate reason string.""" transition = alarm.state != state alarms_to_report = [alarm_id for alarm_id, alarm_state in underlying_states if alarm_state == state] reason_data = cls._reason_data(alarms_to_report) if transition: return (_('Transition to %(state)s due to alarms' ' %(alarm_ids)s in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarms_to_report)}), reason_data return (_('Remaining as %(state)s due to alarms' ' %(alarm_ids)s in state %(state)s') % {'state': state, 'alarm_ids': ",".join(alarms_to_report)}), reason_data def _transition(self, alarm, underlying_states): """Transition alarm state if necessary.""" op = alarm.rule['operator'] if COMPARATORS[op](s == evaluator.ALARM for __, s in underlying_states): state = evaluator.ALARM else: state = evaluator.OK continuous = alarm.repeat_actions reason, reason_data = self._reason(alarm, state, underlying_states) if alarm.state != state or continuous: self._refresh(alarm, state, reason, reason_data) def evaluate(self, alarm): if not self.within_time_constraint(alarm): LOG.debug('Attempted to evaluate alarm %s, but it is not ' 'within its time constraint.', alarm.alarm_id) return states = zip(alarm.rule['alarm_ids'], moves.map(self._get_alarm_state, alarm.rule['alarm_ids'])) # states is consumed more than once, we need a list states = list(states) if self._sufficient_states(alarm, states): self._transition(alarm, states) aodh-2.0.6/aodh/evaluator/__init__.py0000664000567000056710000002316113076064372020634 0ustar jenkinsjenkins00000000000000# # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import datetime import json import croniter from oslo_config import cfg from oslo_log import log from oslo_service import service as os_service from oslo_utils import timeutils import pytz import six from stevedore import extension import uuid import aodh from aodh import coordination from aodh.i18n import _, _LW from aodh import keystone_client from aodh import messaging from aodh import queue from aodh import rpc from aodh import storage from aodh.storage import models LOG = log.getLogger(__name__) UNKNOWN = 'insufficient data' OK = 'ok' ALARM = 'alarm' OPTS = [ cfg.BoolOpt('record_history', default=True, deprecated_group="alarm", help='Record alarm change events.' ), ] @six.add_metaclass(abc.ABCMeta) class Evaluator(object): """Base class for alarm rule evaluator plugins.""" def __init__(self, conf): self.conf = conf if conf.ipc_protocol == 'rpc': self.notifier = rpc.RPCAlarmNotifier(self.conf) else: self.notifier = queue.AlarmNotifier(self.conf) self.storage_conn = None self._ks_client = None self._alarm_change_notifier = None @property def ks_client(self): if self._ks_client is None: self._ks_client = keystone_client.get_client(self.conf) return self._ks_client @property def _storage_conn(self): if not self.storage_conn: self.storage_conn = storage.get_connection_from_config(self.conf) return self.storage_conn def _record_change(self, alarm): if not self.conf.record_history: return type = models.AlarmChange.STATE_TRANSITION detail = json.dumps({'state': alarm.state}) user_id, project_id = self.ks_client.user_id, self.ks_client.project_id on_behalf_of = alarm.project_id now = timeutils.utcnow() payload = dict(event_id=str(uuid.uuid4()), alarm_id=alarm.alarm_id, type=type, detail=detail, user_id=user_id, project_id=project_id, on_behalf_of=on_behalf_of, timestamp=now) try: self._storage_conn.record_alarm_change(payload) except aodh.NotImplementedError: pass if not self._alarm_change_notifier: transport = messaging.get_transport(self.conf) self._alarm_change_notifier = messaging.get_notifier( transport, publisher_id="aodh.evaluator") notification = "alarm.state_transition" self._alarm_change_notifier.info({}, notification, payload) def _refresh(self, alarm, state, reason, reason_data, always_record=False): """Refresh alarm state.""" try: previous = alarm.state alarm.state = state if previous != state or always_record: LOG.info(_('alarm %(id)s transitioning to %(state)s because ' '%(reason)s') % {'id': alarm.alarm_id, 'state': state, 'reason': reason}) try: self._storage_conn.update_alarm(alarm) except storage.AlarmNotFound: LOG.warning(_LW("Skip updating this alarm's state, the" "alarm: %s has been deleted"), alarm.alarm_id) else: self._record_change(alarm) self.notifier.notify(alarm, previous, reason, reason_data) elif alarm.repeat_actions: self.notifier.notify(alarm, previous, reason, reason_data) except Exception: # retry will occur naturally on the next evaluation # cycle (unless alarm state reverts in the meantime) LOG.exception(_('alarm state update failed')) @classmethod def within_time_constraint(cls, alarm): """Check whether the alarm is within at least one of its time limits. If there are none, then the answer is yes. """ if not alarm.time_constraints: return True now_utc = timeutils.utcnow().replace(tzinfo=pytz.utc) for tc in alarm.time_constraints: tz = pytz.timezone(tc['timezone']) if tc['timezone'] else None now_tz = now_utc.astimezone(tz) if tz else now_utc start_cron = croniter.croniter(tc['start'], now_tz) if cls._is_exact_match(start_cron, now_tz): return True # start_cron.cur has changed in _is_exact_match(), # croniter cannot recover properly in some corner case. start_cron = croniter.croniter(tc['start'], now_tz) latest_start = start_cron.get_prev(datetime.datetime) duration = datetime.timedelta(seconds=tc['duration']) if latest_start <= now_tz <= latest_start + duration: return True return False @staticmethod def _is_exact_match(cron, ts): """Handle edge in case when both parameters are equal. Handle edge case where if the timestamp is the same as the cron point in time to the minute, croniter returns the previous start, not the current. We can check this by first going one step back and then one step forward and check if we are at the original point in time. """ cron.get_prev() diff = (ts - cron.get_next(datetime.datetime)).total_seconds() return abs(diff) < 60 # minute precision @abc.abstractmethod def evaluate(self, alarm): """Interface definition. evaluate an alarm alarm Alarm: an instance of the Alarm """ class AlarmEvaluationService(os_service.Service): PARTITIONING_GROUP_NAME = "alarm_evaluator" EVALUATOR_EXTENSIONS_NAMESPACE = "aodh.evaluator" def __init__(self, conf): super(AlarmEvaluationService, self).__init__() self.conf = conf self.storage_conn = None self._load_evaluators() self.partition_coordinator = coordination.PartitionCoordinator(conf) @property def _storage_conn(self): if not self.storage_conn: self.storage_conn = storage.get_connection_from_config(self.conf) return self.storage_conn def _load_evaluators(self): self.evaluators = extension.ExtensionManager( namespace=self.EVALUATOR_EXTENSIONS_NAMESPACE, invoke_on_load=True, invoke_args=(self.conf,) ) def _evaluate_assigned_alarms(self): try: alarms = self._assigned_alarms() LOG.info(_('initiating evaluation cycle on %d alarms') % len(alarms)) for alarm in alarms: self._evaluate_alarm(alarm) except Exception: LOG.exception(_('alarm evaluation cycle failed')) def _evaluate_alarm(self, alarm): """Evaluate the alarms assigned to this evaluator.""" if alarm.type not in self.evaluators: LOG.debug('skipping alarm %s: type unsupported', alarm.alarm_id) return LOG.debug('evaluating alarm %s', alarm.alarm_id) try: self.evaluators[alarm.type].obj.evaluate(alarm) except Exception: LOG.exception(_('Failed to evaluate alarm %s'), alarm.alarm_id) def start(self): super(AlarmEvaluationService, self).start() self.partition_coordinator.start() self.partition_coordinator.join_group(self.PARTITIONING_GROUP_NAME) # allow time for coordination if necessary delay_start = self.partition_coordinator.is_active() if self.evaluators: interval = self.conf.evaluation_interval self.tg.add_timer( interval, self._evaluate_assigned_alarms, initial_delay=interval if delay_start else None) if self.partition_coordinator.is_active(): heartbeat_interval = min(self.conf.coordination.heartbeat, self.conf.evaluation_interval / 4) self.tg.add_timer(heartbeat_interval, self.partition_coordinator.heartbeat) # Add a dummy thread to have wait() working self.tg.add_timer(604800, lambda: None) def _assigned_alarms(self): # NOTE(r-mibu): The 'event' type alarms will be evaluated by the # event-driven alarm evaluator, so this periodical evaluator skips # those alarms. all_alarms = self._storage_conn.get_alarms(enabled=True, exclude=dict(type='event')) all_alarms = list(all_alarms) all_alarm_ids = [a.alarm_id for a in all_alarms] selected = self.partition_coordinator.extract_my_subset( self.PARTITIONING_GROUP_NAME, all_alarm_ids) return list(filter(lambda a: a.alarm_id in selected, all_alarms)) aodh-2.0.6/aodh/evaluator/gnocchi.py0000664000567000056710000001115013076064372020502 0ustar jenkinsjenkins00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from gnocchiclient import client from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from aodh.evaluator import threshold from aodh.i18n import _ from aodh import keystone_client LOG = log.getLogger(__name__) OPTS = [ cfg.StrOpt('gnocchi_url', deprecated_group="alarm", deprecated_for_removal=True, help='URL to Gnocchi. default: autodetection'), ] class GnocchiBase(threshold.ThresholdEvaluator): def __init__(self, conf): super(GnocchiBase, self).__init__(conf) self._gnocchi_client = client.Client( '1', keystone_client.get_session(conf), interface=conf.service_credentials.interface, region_name=conf.service_credentials.region_name, endpoint_override=conf.gnocchi_url) @staticmethod def _sanitize(rule, statistics): """Return the datapoints that correspond to the alarm granularity""" # TODO(sileht): if there's no direct match, but there is an archive # policy with granularity that's an even divisor or the period, # we could potentially do a mean-of-means (or max-of-maxes or whatever, # but not a stddev-of-stddevs). # TODO(sileht): support alarm['exclude_outliers'] LOG.debug('sanitize stats %s', statistics) statistics = [stats[2] for stats in statistics if stats[1] == rule['granularity']] statistics = statistics[-rule['evaluation_periods']:] LOG.debug('pruned statistics to %d', len(statistics)) return statistics class GnocchiResourceThresholdEvaluator(GnocchiBase): def _statistics(self, rule, start, end): try: return self._gnocchi_client.metric.get_measures( metric=rule['metric'], start=start, stop=end, resource_id=rule['resource_id'], aggregation=rule['aggregation_method']) except Exception: LOG.exception(_('alarm stats retrieval failed')) return [] class GnocchiAggregationMetricsThresholdEvaluator(GnocchiBase): def _statistics(self, rule, start, end): try: # FIXME(sileht): In case of a heat autoscaling stack decide to # delete an instance, the gnocchi metrics associated to this # instance will be no more updated and when the alarm will ask # for the aggregation, gnocchi will raise a 'No overlap' # exception. # So temporary set 'needed_overlap' to 0 to disable the # gnocchi checks about missing points. For more detail see: # https://bugs.launchpad.net/gnocchi/+bug/1479429 return self._gnocchi_client.metric.aggregation( metrics=rule['metrics'], start=start, stop=end, aggregation=rule['aggregation_method'], needed_overlap=0) except Exception: LOG.exception(_('alarm stats retrieval failed')) return [] class GnocchiAggregationResourcesThresholdEvaluator(GnocchiBase): def _statistics(self, rule, start, end): # FIXME(sileht): In case of a heat autoscaling stack decide to # delete an instance, the gnocchi metrics associated to this # instance will be no more updated and when the alarm will ask # for the aggregation, gnocchi will raise a 'No overlap' # exception. # So temporary set 'needed_overlap' to 0 to disable the # gnocchi checks about missing points. For more detail see: # https://bugs.launchpad.net/gnocchi/+bug/1479429 try: return self._gnocchi_client.metric.aggregation( metrics=rule['metric'], query=jsonutils.loads(rule['query']), resource_type=rule["resource_type"], start=start, stop=end, aggregation=rule['aggregation_method'], needed_overlap=0, ) except Exception: LOG.exception(_('alarm stats retrieval failed')) return [] aodh-2.0.6/aodh/messaging.py0000664000567000056710000000550013076064372017045 0ustar jenkinsjenkins00000000000000# -*- coding: utf-8 -*- # Copyright 2013-2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import oslo_messaging from oslo_messaging import serializer as oslo_serializer DEFAULT_URL = "__default__" TRANSPORTS = {} _SERIALIZER = oslo_serializer.JsonPayloadSerializer() def setup(): oslo_messaging.set_transport_defaults('aodh') def get_transport(conf, url=None, optional=False, cache=True): """Initialise the oslo_messaging layer.""" global TRANSPORTS, DEFAULT_URL cache_key = url or DEFAULT_URL transport = TRANSPORTS.get(cache_key) if not transport or not cache: try: transport = oslo_messaging.get_transport(conf, url) except (oslo_messaging.InvalidTransportURL, oslo_messaging.DriverLoadFailure): if not optional or url: # NOTE(sileht): oslo_messaging is configured but unloadable # so reraise the exception raise return None else: if cache: TRANSPORTS[cache_key] = transport return transport def get_rpc_server(conf, transport, topic, endpoint): """Return a configured oslo_messaging rpc server.""" target = oslo_messaging.Target(server=conf.host, topic=topic) return oslo_messaging.get_rpc_server(transport, target, [endpoint], executor='threading', serializer=_SERIALIZER) def get_rpc_client(transport, retry=None, **kwargs): """Return a configured oslo_messaging RPCClient.""" target = oslo_messaging.Target(**kwargs) return oslo_messaging.RPCClient(transport, target, serializer=_SERIALIZER, retry=retry) def get_notification_listener(transport, targets, endpoints, allow_requeue=False): """Return a configured oslo_messaging notification listener.""" return oslo_messaging.get_notification_listener( transport, targets, endpoints, executor='threading', allow_requeue=allow_requeue) def get_notifier(transport, publisher_id): """Return a configured oslo_messaging notifier.""" notifier = oslo_messaging.Notifier(transport, serializer=_SERIALIZER) return notifier.prepare(publisher_id=publisher_id) aodh-2.0.6/aodh/queue.py0000664000567000056710000000411713076064372016217 0ustar jenkinsjenkins00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import oslo_messaging import six from aodh import messaging from aodh.storage import models OPTS = [ cfg.StrOpt('notifier_topic', default='alarming', help='The topic that aodh uses for alarm notifier ' 'messages.'), ] LOG = log.getLogger(__name__) class AlarmNotifier(object): def __init__(self, conf): self.notifier = oslo_messaging.Notifier( messaging.get_transport(conf), driver='messagingv2', publisher_id="alarming.evaluator", topic=conf.notifier_topic) def notify(self, alarm, previous, reason, reason_data): actions = getattr(alarm, models.Alarm.ALARM_ACTIONS_MAP[alarm.state]) if not actions: LOG.debug('alarm %(alarm_id)s has no action configured ' 'for state transition from %(previous)s to ' 'state %(state)s, skipping the notification.', {'alarm_id': alarm.alarm_id, 'previous': previous, 'state': alarm.state}) return payload = {'actions': actions, 'alarm_id': alarm.alarm_id, 'alarm_name': alarm.name, 'severity': alarm.severity, 'previous': previous, 'current': alarm.state, 'reason': six.text_type(reason), 'reason_data': reason_data} self.notifier.sample({}, 'alarm.update', payload) aodh-2.0.6/aodh/keystone_client.py0000664000567000056710000001643013076064372020273 0ustar jenkinsjenkins00000000000000# # Copyright 2015 eNovance # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from keystoneauth1 import discover as ka_discover from keystoneauth1 import exceptions as ka_exception from keystoneauth1 import identity as ka_identity from keystoneauth1.identity.generic import password from keystoneauth1 import loading as ka_loading from keystoneclient import session from keystoneclient.v3 import client as ks_client_v3 from oslo_config import cfg from oslo_log import log LOG = log.getLogger(__name__) CFG_GROUP = "service_credentials" def get_session(conf): """Get an aodh service credentials auth session.""" auth_plugin = ka_loading.load_auth_from_conf_options(conf, CFG_GROUP) return ka_loading.load_session_from_conf_options( conf, CFG_GROUP, auth=auth_plugin ) def get_client(conf): """Return a client for keystone v3 endpoint.""" sess = get_session(conf) return ks_client_v3.Client(session=sess) def get_trusted_client(conf, trust_id): # Ideally we would use load_session_from_conf_options, but we can't do that # *and* specify a trust, so let's create the object manually. if conf[CFG_GROUP].auth_type == "password-aodh-legacy": auth_url = conf[CFG_GROUP].os_auth_url try: auth_url_noneversion = auth_url.replace('/v2.0', '/') discover = ka_discover.Discover(auth_url=auth_url_noneversion) v3_auth_url = discover.url_for('3.0') if v3_auth_url: auth_url = v3_auth_url else: auth_url = auth_url except Exception: auth_url = auth_url.replace('/v2.0', '/v3') auth_plugin = password.Password( username=conf[CFG_GROUP].os_username, password=conf[CFG_GROUP].os_password, auth_url=auth_url, user_domain_id='default', trust_id=trust_id) else: auth_plugin = password.Password( username=conf[CFG_GROUP].username, password=conf[CFG_GROUP].password, auth_url=conf[CFG_GROUP].auth_url, user_domain_id=conf[CFG_GROUP].user_domain_id, trust_id=trust_id) sess = session.Session(auth=auth_plugin) return ks_client_v3.Client(session=sess) def get_auth_token(client): return client.session.auth.get_access(client.session).auth_token def get_client_on_behalf_user(auth_plugin): """Return a client for keystone v3 endpoint.""" sess = session.Session(auth=auth_plugin) return ks_client_v3.Client(session=sess) def create_trust_id(conf, trustor_user_id, trustor_project_id, roles, auth_plugin): """Create a new trust using the aodh service user.""" admin_client = get_client(conf) trustee_user_id = admin_client.session.get_user_id() client = get_client_on_behalf_user(auth_plugin) trust = client.trusts.create(trustor_user=trustor_user_id, trustee_user=trustee_user_id, project=trustor_project_id, impersonation=True, role_names=roles) return trust.id def delete_trust_id(trust_id, auth_plugin): """Delete a trust previously setup for the aodh user.""" client = get_client_on_behalf_user(auth_plugin) try: client.trusts.delete(trust_id) except ka_exception.NotFound: pass OPTS = [ cfg.StrOpt('region-name', default=os.environ.get('OS_REGION_NAME'), deprecated_name="os-region-name", help='Region name to use for OpenStack service endpoints.'), cfg.StrOpt('interface', default=os.environ.get( 'OS_INTERFACE', os.environ.get('OS_ENDPOINT_TYPE', 'public')), deprecated_name="os-endpoint-type", choices=('public', 'internal', 'admin', 'auth', 'publicURL', 'internalURL', 'adminURL'), help='Type of endpoint in Identity service catalog to use for ' 'communication with OpenStack services.'), ] def register_keystoneauth_opts(conf): ka_loading.register_auth_conf_options(conf, CFG_GROUP) ka_loading.register_session_conf_options( conf, CFG_GROUP, deprecated_opts={'cacert': [ cfg.DeprecatedOpt('os-cacert', group=CFG_GROUP), cfg.DeprecatedOpt('os-cacert', group="DEFAULT")] }) conf.set_default("auth_type", default="password-aodh-legacy", group=CFG_GROUP) def setup_keystoneauth(conf): if conf[CFG_GROUP].auth_type == "password-aodh-legacy": LOG.warning("Value 'password-aodh-legacy' for '[%s]/auth_type' " "is deprecated. And will be removed in Aodh 3.0. " "Use 'password' instead.", CFG_GROUP) ka_loading.load_auth_from_conf_options(conf, CFG_GROUP) class LegacyAodhKeystoneLoader(ka_loading.BaseLoader): @property def plugin_class(self): return ka_identity.V2Password def get_options(self): options = super(LegacyAodhKeystoneLoader, self).get_options() options.extend([ ka_loading.Opt( 'os-username', default=os.environ.get('OS_USERNAME', 'aodh'), help='User name to use for OpenStack service access.'), ka_loading.Opt( 'os-password', secret=True, default=os.environ.get('OS_PASSWORD', 'admin'), help='Password to use for OpenStack service access.'), ka_loading.Opt( 'os-tenant-id', default=os.environ.get('OS_TENANT_ID', ''), help='Tenant ID to use for OpenStack service access.'), ka_loading.Opt( 'os-tenant-name', default=os.environ.get('OS_TENANT_NAME', 'admin'), help='Tenant name to use for OpenStack service access.'), ka_loading.Opt( 'os-auth-url', default=os.environ.get('OS_AUTH_URL', 'http://localhost:5000/v2.0'), help='Auth URL to use for OpenStack service access.'), ]) return options def load_from_options(self, **kwargs): options_map = { 'os_auth_url': 'auth_url', 'os_username': 'username', 'os_password': 'password', 'os_tenant_name': 'tenant_name', 'os_tenant_id': 'tenant_id', } identity_kwargs = dict((options_map[o.dest], kwargs.get(o.dest) or o.default) for o in self.get_options() if o.dest in options_map) return self.plugin_class(**identity_kwargs) aodh-2.0.6/AUTHORS0000664000567000056710000002334713076064717014667 0ustar jenkinsjenkins00000000000000Abhishek Chanda Abhishek Lekshmanan Abhishek Lekshmanan Adelina Tuvenie Ajaya Agrawal Akhil Hingane Ala Rezmerita Alessandro Pilotti Alexei Kornienko Ana Malagon Andreas Jaeger Andreas Jaeger Andrew Hutchings Andrew Melton Angus Lees Angus Salkeld Ann Kamyshnikova Artur Svechnikov Balazs Gibizer Bartosz Górski Ben Nemec Ben Nemec Boris Pavlovic Brad Pokorny Brant Knudson Brian Cline Brooklyn Chen Can ZHANG Cedric Soulas Chad Lung ChangBo Guo(gcb) Chaozhe.Chen ChenZheng Chinmaya Bharadwaj Chmouel Boudjnah Chris Dent Chris Dent Christian Berendt Christian Martinez Christian Schwede Chuck Short Clark Boylan Claudiu Belu Cyril Roelandt Damian Van Vuuren Dan Florea Dan Prince Darren Birkett Davanum Srinivas David Peraza David Wahlstrom Dazhao Debo~ Dutta Dina Belova Dirk Mueller Divya Doug Hellmann Edwin Zhai Emilien Macchi Endre Karlson Eoghan Glynn Eoghan Glynn Eric Brown Fabio Giannetti Fei Long Wang Feng Xi Yan Fengqian Gao Flavio Percoco François Charlier François Rossigneux Frederic FAURE Gangyi Luo Gauvain Pocentek Gerard Garcia Gordon Chung Graham Binns Guangyu Suo Hang Liu Haomeng, Wang Harri Hämäläinen Hisashi Osanai Igor Degtiarov Ihar Hrachyshka Ildiko Vancsa Ilya Sviridov Ilya Tyaptin Ionuț Arțăriși Jake Liu James E. Blair Jason Myers Jason Zhang Jay Lau Jay Pipes Jeremy Stanley Jim Rollenhagen Joanna H. Huang Joe Gordon Joe H. Rahme John H. Tran John Herndon JordanP Julien Danjou Justin SB KIYOHIRO ADACHI Kamil Rykowski Keith Byrne Ken Pepple Ken'ichi Ohmichi Kennan Kennan Kishore Juigil Koert van der Veer Komei Shimamura Ladislav Smola Lan Qi song Lena Novokshonova Lianhao Lu LinuxJedi LiuNanke LiuSheng Luis A. Garcia Maho Koshiya Mark McClain Mark McLoughlin Martin Geisler Martin Kletzander Mathew Odden Mathieu Gagné Matt Riedemann Mehdi Abaakouk Mehdi Abaakouk Michael Krotscheck Michael Still Michał Jastrzębski Mike Spreitzer Monsyne Dragon Monty Taylor Nadya Privalova Nadya Shakhat Nejc Saje Nick Barcet Nicolas Barcet (nijaba) Noorul Islam K M Octavian Ciuhandu Paul Belanger Peter Portante Phil Neal Piyush Masrani Pradeep Kilambi Pradeep Kilambi Pradeep Kumar Singh Pradyumna Sampath Pádraig Brady Qiaowei Ren Rafael Rivero Rich Bowen Rikimaru Honjo Rob Raymond Robert Mizielski Rohit Jaiswal Romain Soufflet Roman Bogorodskiy Rosario Di Somma Ruslan Aliev Russell Bryant Ryan Petrello Ryota MIBU Saba Ahmed Sam Morrison Samta Samuel Merritt Sandy Walsh Sanja Nosan Sascha Peilicke Sean Dague Sergey Lukjanov Sergey Vilgelm Shane Wang Shengjie Min Shilla Saebi Shuangtai Tian Shuquan Huang Simona Iuliana Toader Sofer Athlan-Guyot Srinivas Sakhamuri Stas Maksimov Stephen Balukoff Stephen Gran Steve Lewis Steve Martinelli Steven Berler Surya Prabhakar Svetlana Shturm Swami Reddy Swann Croiset Swapnil Kulkarni (coolsvap) Sylvain Afchain Tatsuro Makita Terri Yu Thierry Carrez Thomas Bechtold Thomas Herve Thomas Herve Thomas Maddox Tong Li Ubuntu Victor Stinner Victor Stinner Vitalii Lebedynskyi Vitaly Gridnev Vladislav Kuzmin Wu Wenxiang Yaguang Tang Yanyan Hu Yassine Lamgarchal Yathiraj Udupi You Yamagata Yunhong, Jiang Zhi Kun Liu Zhi Yan Liu ZhiQiang Fan Zhongyue Luo annegentle ccrouch eNovance emilienm florent fujioka yuuichi gengjh gord chung guillaume pernot heha joyce keliang kiwik-chenrui leizhang lijian liuqing liusheng lizheming lqslan lrqrun ls1175 lvdongbing lzhijun mizeng nellysmitt replay sanuptpm sh.huang shengjie min srsakhamuri tanlin terriyu vagrant venkatamahesh vivek.nandavanam vivek.nandavanam xingzhou yanheven zhang-jinnan zhangguoqing zjingbj aodh-2.0.6/babel.cfg0000664000567000056710000000002113076064371015321 0ustar jenkinsjenkins00000000000000[python: **.py] aodh-2.0.6/CONTRIBUTING.rst0000664000567000056710000000105513076064371016244 0ustar jenkinsjenkins00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: http://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/aodh aodh-2.0.6/ChangeLog0000664000567000056710000031531113076064717015364 0ustar jenkinsjenkins00000000000000CHANGES ======= 2.0.6 ----- * read data from stdout instead of stderr 2.0.5 ----- * Restore trust workaround for legacy auth * Limit Happybase to < 1.0.0 2.0.4 ----- * trust: add missing project for v2 auth * Handles trust with auth-type password-aodh-legacy * Fix trust notifier 2.0.3 ----- * Don't notify alarm on each refresh * sqlalchemy: allow to upgrade schema from Ceilometer Liberty * gnocchi: always set needed_overlap for aggregation 2.0.2 ----- * Correct concurrency of gabbi tests for gabbi 1.22.0 * Catch DriverLoadFailure for get_transport optional * Add a tool for migrating alarms data from NoSQL to SQL 2.0.1 ----- * Fix and improve the partition coordinator * Update .gitreview for stable/mitaka 2.0.0 ----- * cleanup core list * Use assertIn and assertNotIn for test * register the config generator default hook with the right name * Replace deprecated LOG.warn with LOG.warning * Properly retrieve keystone user from admin client * Fixed tempest error due to upstream change * Record all the fired alarm state for event-alarm * tempest: migrate api tests from tempest tree * add missing mitaka-3 release notes * A little typo of doc * Moved CORS middleware configuration into oslo-config-generator 2.0.0.0b3 --------- * Add composite alarm usage description * Remove unused pngmath Sphinx extension * Fix py34 error of indexing 'dict_keys' object * Add releasenote for composite alarm feature * Change the SERVICE_TENANT_NAME to SERVICE_PROJECT_NAME * Fix tempest test path * Add composite rule alarm API support * Add composite rule alarm evaluator * Remove ceilometer-alarm-* related content of installation * Clean etc directory * Install configuration files by default * KEYSTONE_CATALOG_BACKEND is deprecated * Added CORS support to Aodh * devstack: Fix Keystone v3 configuration typo * Fix alarm reason * Clean config in source code * tempest: add aodh tempest plugin * gabbi's own paste.ini file * Log deprecation message if users use nosql backend * devstack: use password with version discovery * devstack: support publicURL retrieval in both keystone v2/v3 format * Load zaqar client outside init * Update alarm history only if change in alarm property * functional tests: fix publicURL retrieval * threshold: fix statistics empty case * tempest: migrate codes from tempest tree 2.0.0.0b2 --------- * Zaqar notifier for alarms * tox: change default target from MongoDB to MySQL * tests: replace bash scripts with overtest * Imported Translations from Zanata * add release notes for mitaka-2 * Refactor Gnocchi and threshold evaluators * gnocchi: use gnocchiclient instead of requests * Use keystoneauth1 instead of manual setup * Replace deprecated library function os.popen() with subprocess * Use assertTrue/False instead of assertEqual(T/F) * Test: make enforce_type=True in CONF.set_override * devstack: add support for Gnocchi * Replace LOG.warn with LOG.warning * Trivial: Remove vim header from source files * Trival: Remove unused logging import * Fix an minor error in test_hbase_table_utils.py * Don't need a metaclass for AlarmEvaluationService * Use extras for dependency installation * Support newer versions of MySQL * rbac: add some backport compat tests * Fix rbac system * MAINTAINERS: remove outdated data * Replace stackforge with openstack * messaging: remove most oslo.context usage 2.0.0.0b1 --------- * add initial release notes * Put py34 first in the env order of tox * Update policy.json.sample with correct values * deprecate timeutils.total_seconds() * clean up integration test urls * initialize ceilometerclient when we use it * fix some test cases wrongly skipped for mysql backend * support queue based communication between evaluator and notifier * remove unnecessary mock for rpc server start * Move the content of ReleaseNotes to README.rst * devstack: fix HBase functional tests * don't pass aodh options to oslo.db engine facade * gnocchi: only evaluate the required eval_periods * Fix combination alarms * Fixing evaluation of gnocchi aggregation-by-metric * add reno for release notes management * Revert "Revert "Use oslo_config PortOpt support"" * Do not use oslo.messaging 2.8.0 * utils: move code where it's actually used and remove * hbase: add functional testing * tests: remove testscenarios usage * Remove eventlet usage * remove default=None for config options * Do not use system config file for test * devstack: install PostgreSQL devel tool for psycopg2 * Move evaluator tests into the unit folder * Revert "Use oslo_config PortOpt support" * Use oslo_config PortOpt support * Add deprecated group for gnocchi_url * Fix indent of code blocks in Devstack plugin README file * Imported Translations from Zanata * devstack: Fix some comments * remove unused configuration options * devstack/plugin.sh: fix typo * monkeypatch thread for oslo.messaging tests * Remove dependency on sphinxcontrib-docbookrestapi * Using oslo-config-generator to instead of generate-config-file.sh * Added README.rst and corrected the rally repository * proposal to add Ryota Mibu to Aodh core * Fix the gabbi target in tox.ini to use correct path 1.1.0 ----- * Avoid oslo.messaging 2.6.0 and 2.6.1 * update ceilometerclient requirement * re-organise tests * Imported Translations from Zanata * Cleanup of Translations * Remove unused file * Add test to cover history rule change * Change ignore-errors to ignore_errors * tox: Allow to pass some OS_* variables * Imported Translations from Zanata * gnocchi: Fix typo for needed_overlap * Cleanup keystonemiddleware configuration * event-alarm: add unit tests for various trait types * event-alarm: add alarm wrapper class * event-alarm: add event wrapper class * Refactor event-alarm caching * event-alarm: fix unit tests to check stored alrams * event-alarm: fix the order of alarms in unit test * event-alarm: delete debug message to show # of alarms 1.0.0 ----- * remove db2 nosql driver * storage: remove unused classes * storage: remove unused CLI option * tests: use requests rather than httplib2 * Remove unused tests requirements * percent_of_overlap=0 to validate gnocchi alarm * Adding liusheng to MAINTAINERS * Fix the aodh api port * Use new location of subunit2html * Add storage documentation * Fix args for get_notification_listener() * Create conf directory during devstack install phase * event-alarm: devstack plugin support * Update tests to reflect WSME 0.8.0 changes * Make event-alarm evaluator caching alarms * Add listener service for event alarm evaluation * Add evaluator for event alarm * doc: Fix the devstack configuration * Revert "Use generic keystone uri in devstack ..." * Imported Translations from Transifex * Exclude event type from targets of alarm evaluator * tox: generate sample config file on default target * Refactor api tests (_update_alarm) * Storage: add 'exclude' constraint to get_alarms() * Use generic keystone uri in devstack config * Avoid translating debug log * Use the Serializer from oslo.messaging * Fixes querying alarm history with severity field * Remove the unused cpu_count utils method * api: move API options to their own api group * storage: remove mongodb_replica_set option * service: stop supporting deprecated group for auth option * storage: remove unused option db2nosql_resource_id_maxlen * Stop registering oslo.messaging option * Move import to local to resolve circular dependency failure * Refactor api tests for alarm history * Move ceilometerclient mock to evaluator/base * Correct database functional tests * Correct thread handling in TranslationHook * storage: re-add and deprecate alarm_connection * Fix TestEvaluatorBase.prepare_alarms() * Make ConnectionRetryTest more reliable * storage: remove deprecated database_connection * Use storage scenario test base to test migration * devstack: use $API_WORKERS to set the number of WSGI workers in Apache * Add 'event' type and 'event_rule' to alarm API * Refactor alarm scenario tests (RuleCombination) * gnocchi: percent_of_overlap=0 for agg. alarms * Drop downgrade field in alembic script.py.mako * Imported Translations from Transifex * Refactor alarm scenario tests (RuleGnocchi) * Add alembic support for aodh * Use mocked object to test log message * storage: only retry connection, not driver finding * Stop using global conf object * gnocchi: stop using global conf object for Gnocchi evaluator * api: fix alarm group declaration * mongodb: stop relying on global conf object in utils * mongodb: replace custom retry code by retrying * evaluator: remove global conf usage from threshold evaluator * rpc: remove global conf usage from notifier * api: remove global conf and local pecan config * api: remove force_canonical option * tests.api: remove unused argument/config option * api: stop using a global Enforcer object * api.hooks: stop using global conf object * Port remaining tests to Python 3 * Keep alarm other attrs constantly after evaluating an alarm * tests: ensure gabbi live test fail * api: fix alarm deletion and update * functionnal: fix gating * Imported Translations from Transifex * mongodb: stop using global config object * tests.db: simplify connection handling * storage: always use get_connection_from_config() * Add keystone V3 support for service credentials * Delete its corresponding history data when deleting an alarm * Avoid getting alarm change notifier repeatedly * Use user_id/project_id from service_credentials in alarm_change * Refactor alarm scenario tests (RuleThreshold) * Fix the service entry of evaluator and notifier * Use stevedore directive to document plugins * Add basic gate functional testing jobs for aodh * notifier: stop using global conf object * tests: use config fixture in evaluator tests * coordination: stop using global conf object * storage: pass conf rather at __init__ than using a global one * evaluator: stop using global conf in evaluator service * evaluator: stop using global conf in Evaluator * notifier: stop relying on global conf object * api: stop using cfg.CONF and use request local conf * keystone_client: stop using cfg.CONF * Move service classes to their correct subdir * api: use oslo.config to validate data for worker * rpc: stop using global conf object in some functions * tests: remove unused fake class * Switch to oslo.utils.fileutils * Move Gnocchi options out of the "alarms" group * Remove aodh/tests/alarm, move last test out * evaluator: move to top-level * notifier: move from alarm to top-level * Close and dispose test database setup connections * Remove remnants of ceilometer from mongodb data file * Make py27 run tests on all backends by default * Imported Translations from Transifex * Move aodh.alarm.rpc to aodh.rpc * Move alarm.service to service * Allow aodh directly acessing to its storage * Refactor alarm scenario tests (TestAlarmsHistory) * trust: remove useless conf imports * api: Add location alarm creation * Add devstack plugin * Use the right sqla type for JSONEncodedDict * Refactor alarm scenario tests (TestAlarmsBase) * Imported Translations from Transifex * Make GnocchiThreshold evaluator derived from ThresholdEvaluator * Tolerate alarm actions set to None * Optionally create trust for alarm actions * Imported Translations from Transifex * doc: use pbr autodoc feature to build api doc * Remove code related to metadata/metaquery * messaging: remove unused cleanup function * impl_log: make methods static * Remove useless migration module * Minor changes for evaluator service * Update the requirements * notifier: tests stop method * api: remove v1 handling * api: remove unused extra_hooks * Move 'alarm_connection' to 'connection' * Move aodh.alarm.storage to aodh.storage * Replaces methods deprecated in pymongo3.0 * Fix options registeration in tests * Change the default api server port * Initial translation import * Rename to aodh * Remove locale * Remove code unrelated to alarming * remove unused notifier * Add support for posting samples to notification-agent via API * Stop dropping deprecated tables while upgrade in mongodb and db2 * Add handler of sample creation notification * Remove the unused get_targets method of plugin base * add oslo.service options * Restricts pipeline to have unique source names * drop use of oslo.db private attribute * Fix oslo.service configuration options building * Add fileutils to openstack-common.conf * Remove unnecessary executable permission * Switch to oslo.service * Remove unnecessary wrapping of transformer ExtentionManager * Port test_complex_query to Python 3 * Fix expected error message on Python 3 * Fix usage of iterator/list on Python 3 * Replaces ensure_index for create_index * pip has its own download cache by default * For sake of future python3 encode FakeMemcache hashes * Make acl_scenarios tests' keystonemiddleware cache work flexibly * Update version for Liberty * Gnocchi Dispatcher support in Ceilometer * Updated from global requirements * Fix alarm rest notifier logging to include severity * Remove useless execute bit on rst file * Fix unicode/bytes issues in API v2 tests * Fix script name in tox.ini for Elasticsearch * Fix the meter unit types to be consistent * tests: use policy_file in group oslo_policy * Fix publisher test_udp on Python 3 * Fix Ceph object store tests on Python 3 * Port IPMI to Python 3 * Port middleware to Python 3 * [elasticsearch] default trait type to string * Updated from global requirements * Lower down the range for columns which are being used as uuid * Sync with latest oslo-incubator * Fix testing of agent manager with tooz * Remove deprecated Swift middleware * add DNS events * Handle database failures on api startup * Fix more tests on Python 3 * Remove old oslo.messaging aliases * Remove useless versioninfo and clean ceilometer.conf git exclusion * Register oslo_log options before using them * Add running functional scripts for defined backend * Remove snapshot.update events as they are not sent * WSME version >=0.7 correctly returns a 405 * TraitText value restricted to max length 255 * Cause gabbi to skip on no storage sooner * Updated from global requirements * Move eventlet using commands into own directory * adjust alarm post ut code to adapt to upstream wsme * Disable rgw pollster when aws module not found * Fixes DiskInfoPollster AttributeError exception * remove useless log message * use oslo.log instead of oslo-incubator code * Port test_inspector to Python 3 * Fix usage of dictionary methods on Python 3 * Imported Translations from Transifex * Add oslo.vmware to Python 3 test dependencies * Remove iso8601 dependency * Enable test_swift_middleware on Python 3 * Enable more tests on Python 3 * Skip hbase tests on Python 3 * Clear useless exclude from flake8 ignore in tox * Remove pagination code * Stop importing print_function * Remove useless release script in tools * Remove useless dependency on posix_ipc * Remove exceute bit on HTTP dispatcher * Remove oslo.messaging compat from Havana * Fixing event types pattern for Role Noti. handler * Mask database.event_connection details in logs * Switch from MySQL-python to PyMySQL * Python 3: replace long with int * Python 3: Replace unicode with six.text_type * Python 3: generalize the usage of the six module * Update Python 3 requirements * Python 3: set __bool__() method on Namespace * Python 3: encode to UTF-8 when needed * Python 3: sort tables by their full name * Python 3: replace sys.maxint with sys.maxsize * Initial commit for functional tests * Update a test to properly anticipate HTTP 405 for RestController * proposal to add Chris Dent to Ceilometer core * rebuild event model only for database writes * cleanup problem events logic in event db storage * fix incorrect docstring for dispatcher * Imported Translations from Transifex * api: record severity change in alarm history * VMware: verify vCenter server certificate * Add hardware memory buffer and cache metrics * Make interval optional in pipeline * Improve ceilometer-api install documentation * empty non-string values are returned as string traits * Trait_* models have incorrect type for key * small change to development.rst file * Drop use of 'oslo' namespace package * [unittests] Increase agent module unittests coverage * stop mocking os.path in test_setup_events_default_config * Remove py33 tox target * made change to mod_wsgi.rst file * ensure collections created on upgrade * Fix raise error when run "tox -egenconfig" * Updated from global requirements * Fix None TypeError in neutron process notifications * Have eventlet monkeypatch the time module * Have eventlet monkeypatch the time module * Add the function of deleting alarm history * Updated from global requirements * Fix valueerror when ceilometer-api start * Override gnocchi_url configuration in test * Move ceilometer/cli.py to ceilometer/cmd/sample.py * Fix valueerror when ceilometer-api start * remove deprecated partitioned alarm service * use message id to generate hbase unique key * gnocchi: fix typo in the aggregation endpoint * Release Import of Translations from Transifex * Fix Copyright date in docs * Replace 'metrics' with 'meters' in option and doc * use message id to generate hbase unique key * update .gitreview for stable/kilo * gnocchi: fix typo in the aggregation endpoint * broadcast data to relevant queues only * Imported Translations from Transifex * fix combination alarm with operator == 'or' * Updated from global requirements * proposal to add ZhiQiang Fan to Ceilometer core * Open Liberty development * Fix a samples xfail test that now succeeds * Cosmetic changes for system architecture docs * Fix a issue for kafka-publisher and refactor the test code * pymongo 3.0 breaks ci gate * use oslo.messaging dispatch filter * Further mock adjustments to deal with intermittent failure * Adds support for default rule in ceilometer policy.json * Updated from global requirements * limit alarm actions * Use oslo_vmware instead of deprecated oslo.vmware * Remove 'samples:groupby' from the Capabilities list * Use old name of 'hardware.ipmi.node.temperature' * Revert "remove instance: meter" * Tweak authenticate event definition * Add project and domain ID to event definition for identity CRUD * Fix the event type for trusts * reset croniter to avoid cur time shift * Imported Translations from Transifex * Avoid a error when py27 and py-mysql tests run in sequence * Stop using PYTHONHASHSEED=0 in ceilometer tests * remove instance: meter * Added ipv6 support for udp publisher * Remove the unnecessary dependency to netaddr * Optimize the flow of getting pollster resources * support ability to skip message signing * Avoid conflict with existing gnocchi_url conf value * Using oslo.db retry decorator for sample create * alarm: Use new gnocchi aggregation API * collector: enable the service to listen on IPv6 * minimise the use of hmac * Typo in pylintrc * Ceilometer retrieve all images by 'all-tenants' * fix incorrect key check in swift notifications * support disabling profiler and http meters * ensure collections created on upgrade * Fix common misspellings * Updated from global requirements * refuse to post sample which is not supported * Enable collector to requeue samples when enabled * drop deprecated novaclient.v1_1 * exclude precise metaquery in query field * Imported Translations from Transifex * remove log message when process notification * Add gabbi tests for resources * Fix typos and format in docstrings in http dispatcher * add ability to dispatch events to http target * doc: fix class name * add ability to publish to multiple topics * make field and value attributes mandatory in API Query * Fix db2 upgrade in multi-thread run issue * Add memory.resident libvirt meter for Ceilometer * Update reference * Check the namespaces duplication for ceilometer-polling * Add gabbi tests to explore the Meter and MetersControllers * Imported Translations from Transifex * mysql doesn't understand intersect * order traits returned within events * add network, kv-store, and http events * Add support for additional identity events * Add a Kafka publisher as a Ceilometer publisher * Fix response POST /v2/meters/(meter_name) to 201 status * Attempt to set user_id for identity events * Switch to oslo.policy 0.3.0 * normalise timestamp in query * Add more power and thermal data * Updated from global requirements * Fix formatting error in licence * Added option to allow sample expiration more frequently * add option to store raw notification * use mongodb distinct * remove event_types ordering assumption * Add gabbi tests to cover the SamplesController * api: fix alarm creation if time_constraint is null * fix log message format in event.storage.impl_sqlalchemy * Remove duplications from docco * Tidy up clean-samples.yaml * Fix a few typos in the docs * use default trait type in event list query * fix wrong string format in libvirt inspector * create a developer section and refactor * Do not default pecan_debug to CONF.debug * Adding Gabbi Tests to Events API * fix config opts in objectstore.rgw * Updated from global requirements * support time to live on event database for sql backend * add an option to disable non-metric meters * add missing objectstore entry points * Initial gabbi testing for alarms * reorganise architecture page * Add ceph object storage meters * Use oslo_config choices support * fix inline multiple assignment * alarming: add gnocchi alarm rules * Protect agent startup from import errors in plugins * Revert "Add ceph object storage meters" * api: move alarm rules into they directory * compress events notes * Destroy fixture database after each gabbi TestSuite * Fix unittests for supporting py-pgsql env * Adding links API and CLI query examples * correct column types in events * Be explicit about using /tmp for temporary datafiles * Patch for fixing hardware.memory.used metric * Add ceph object storage meters * [PostgreSQL] Fix regexp operator * Add clean_exit for py-pgsql unit tests * modify events sql schema to reduce empty columns * Remove duplicated resource when pollster polling * check metering_connection attribute by default * unicode error in event converter * cleanup measurements page * api: add missing combination_rule field in sample * Fix test case of self-disabled pollster * update event architecture diagram * use configured max_retries and retry_interval for database connection * Updated from global requirements * Making utilization the default spelling * Add Disk Meters for ceilometer * correctly leave group when process is stopped * Updated from global requirements * enable oslo namespace check for ceilometer project * Add doc for version list API * Enabling self-disabled pollster * Use werkzeug to run the developement API server * Imported Translations from Transifex * switch to oslo_serialization * move non-essential libs to test-requirements * Validate default values in config * fix the value of query_spec.maxSample to advoid to be zero * clean up to use common service code * Add more sql test scenarios * [SQLalchemy] Add regex to complex queries * Fix duplication in sinks names * metering data ttl sql backend breaks resource metadata * Refactor unit test code for disk pollsters * start recording error notifications * Remove no_resource hack for IPMI pollster * Add local node resource for IPMI pollsters * Use stevedore to load alarm rules api * [MongoDB] Add regex to complex queries * Imported Translations from Transifex * support time to live on event database for MongoDB * split api.controllers.v2 * add elasticsearch events db * use debug value for pecan_debug default * Shuffle agents to send request * Updated from global requirements * Adds disk iops metrics implementation in Hyper-V Inspector * discovery: allow to discover all endpoints * Declarative HTTP testing for the Ceilometer API * add listener to pick up notification from ceilometermiddleware * Drop deprecated namespace for oslo.rootwrap * remove empty module tests.collector * Add disk latency metrics implementation in Hyper-V Inspector * add event listener to collector * add notifier publisher for events * enable event pipeline * Imported Translations from Transifex * deprecate swift middleware * sync oslo and bring in versionutils * Expose alarm severity in Alarm Model * Hyper-V: Adds memory metrics implementation * Remove mox from requirements * Fix IPMI unit test to cover different platforms * adjust import group order in db2 ut code * add event pipeline * remove unexistent module from doc/source/conf.py * Upgrade to hacking 0.10 * Remove the Nova notifier * Remove argparse from requirements * [MongoDB] Improves get_meter_statistics method * Fix docs repeating measuring units * [DB2 nosql] Create TIMESTAMP type index for 'timestamp' field * remove pytidylib and netifaces from tox.ini external dependency * Avoid unnecessary API dependency on tooz & ceilometerclient * Correct name of "ipmi" options group * Fix Opencontrail pollster according the API changes * enable tests.storage.test_impl_mongodb * Remove lockfile from requirements * Disable eventlet monkey-patching of DNS * Expose vm's metadata to metrics * Adding build folders & sorting gitignore * Disable proxy in unit test case of test_bin * Add Event and Trait API to document * Refactor ipmi agent manager * Use alarm's evaluation periods in sufficient test * Use oslo_config instead of deprecated oslo.config * Avoid executing ipmitool in IPMI unit test * Updated from global requirements * Add a direct to database publisher * Fixed MagnetoDB metrics title * Imported Translations from Transifex * Fix incorrect test case name in test_net.py * Updated from global requirements * notification agent missing CONF option * switch to oslo_i18n * Use right function to create extension list for agent test * Imported Translations from Transifex * Add an exchange for Zaqar in profiler notification plugin * Remove unused pecan configuration options * Updated from global requirements * Use oslo_utils instead of deprecated oslo.utils * Match the meter names for network services * stop using private timeutils attribute * Update measurement docs for network services * Catch exception when evaluate single alarm * Return a meaningful value or raise an excpetion for libvirt * Imported Translations from Transifex * make transformers optional in pipeline * Added metering for magnetodb * Add release notes URL for Juno * Fix release notes URL for Icehouse * remove unnecessary str method when log messages * Revert "Remove Sphinx from py33 requirements" * untie pipeline manager from samples * reset listeners on agent refresh * Remove inspect_instances method from virt * Optimize resource list query * Synchronize Python 3 requirements * Remove unnecessary import_opt|group * Add test data generator via oslo messaging * Check to skip to poll and publish when no resource * Add oslo.concurrency module to tox --env genconfig * add glance events * add cinder events * Manual update from global requirements * Add cmd.polling.CLI_OPTS to option list * Ignore ceilometer.conf * Switch to oslo.context library * Revert "Skip to poll and publish when no resources found" * Added missing measurements and corrected errors in doc * Remove Sphinx from py33 requirements * Clean up bin directory * Improve tools/make_test_data.sh correctness * ensure unique pipeline names * implement notification coordination * Make methods static where possible (except openstack.common) * Fix docs to suit merged compute/central agents concept * Drop anyjson * Move central agent code to the polling agent module * RBAC Support for Ceilometer API Implementation * [SQLalchemy] Add groupby ability resource_metadata * Improve links in config docs * Make LBaaS total_connections cumulative * remove useless looping in pipeline * Encompassing one source pollsters with common context * Modify tests to support ordering of wsme types * Make compute discovery pollster-based, not agent-level * Add docs about volume/snapshot measurements * Port to graduated library oslo.i18n * Retry to connect database when DB2 or mongodb is restarted * Updated from global requirements * Standardize timestamp fields of ceilometer API * Workflow documentation is now in infra-manual * Add alarm_name field to alarm notification * Updated from global requirements * Rely on VM UUID to fetch metrics in libvirt * Imported Translations from Transifex * Initializing a longer resource id in DB2 nosql backend * Sync oslo-incubator code to latest * ensure unique list of consumers created * fix import oslo.concurrency issue * Add some rally scenarios * Do not print snmpd password in logs * Miniscule typo in metering_connection help string * add http dispatcher * [MongoDB] Add groupby ability on resource_metadata * [MongoDB] Fix bug with 'bad' chars in metadatas keys * Override retry_interval in MongoAutoReconnectTest * Exclude tools/lintstack.head.py for pep8 check * Add encoding of rows and qualifiers in impl_hbase * Database.max_retries only override on sqlalchemy side * Support to capture network services notifications * Internal error with period overflow * Remove Python 2.6 classifier * Enable pep8 on ./tools directory * Imported Translations from Transifex * Fixes Hyper-V Inspector disk metrics cache issue * fix swift middleware parsing * Fix order of arguments in assertEqual * Updated from global requirements * Adapting pylint runner to the new message format * Validate AdvEnum & return an InvalidInput on error * add sahara and heat events * add keystone events to definitions * Add timeout to all http requests * [MongoDB] Refactor time to live feature * transform samples only when transformers exist * Updated from global requirements * Remove module not really used by Ceilometer * Switch to oslo.concurrency * Skip to poll and publish when no resources found * Change event type for identity trust notifications * Add mysql and postgresql in tox for debug env * Add new notifications types for volumes/snapshots * Add encoding to keys in compute_signature * Tests for system and network aggregate pollsters * Add bandwidth to measurements * Fix wrong example of capabilities * Correct the mongodb_replica_set option's description * Alarms listing based on "timestamp" * Use 'pg_ctl' utility to start and stop database * Correct alarm timestamp field in unittest code * Refactor kwapi unit test * Remove duplicated config doc * VMware: Enable VMware inspector to support any port * Clean event method difinition in meter storage base * Fix some nits or typos found by chance * Add Sample ReST API path in webapi document * Enable filter alarms by their type * Fix storage.hbase.util.prepare_key() for 32-bits system * Add event storage for test_hbase_table_utils * Add per device rate metrics for instances * Fix hacking rule H305 imports not grouped correctly * Add __repr__ method for sample.Sample * remove ordereddict requirement * Improve manual.rst file * Imported Translations from Transifex * Fix columns migrating for PostgreSQL * Updated from global requirements * Updated from global requirements * [MongoDB] Fix bug with reconnection to new master node * Updated from global requirements * support request-id * Update coverage job to references correct file * remove reference to model in migration * Use oslo_debug_helper and remove our own version * Allow collector service database connection retry * refresh ceilometer architecture documentation * Edits assert methods * Adds memory stats meter to libvirt inspector * Edits assert methods * Edits assert methods * Edits assert methods * Edits assert method * Imported Translations from Transifex * Imported Translations from Transifex * Updated from global requirements * add script to generate test event data * Handle poorly formed individual sensor readings * refactor hbase storage code * Avoid clobbering existing class definition * Hoist duplicated AlarmService initialization to super * Clarify deprecation comment to be accurate * Work toward Python 3.4 support and testing * Fix recording failure for system pollster * sync and clean up oslo * Add missing notification options to the documentation * Add missing alarm options to the documentation * Add oslo.db to config generator * Add missed control exchange options to the documentation * Add coordination related options to the documentation * Add missing collector options to the documentation * switch to oslo-config-generator * Edit docs for docs.opentack.org/developer/ * Add oslo.db to config generator * Fix signature validation failure when using qpid message queue * clean capabilities * move db2 and mongo driver to event tree * move sql event driver to event tree * move hbase event driver to event tree * Sets default encoding for PostgreSQL testing * update database dispatcher to use events db * Add role assignment notifications for identity * add mailmap to avoid dup of authors * Add user_metadata to network samples * Fix recording failure for system pollster * Manually updated translations * Updated from global requirements * Creates one database per sql test * Adds pylint check for critical error in new patches * Fix neutron client to catch 404 exceptions * Fix OrderedDict usage for Python 2.6 * Include a 'node' key and value in ipmi metadata * clean path in swift middleware * Implement redesigned separator in names of columns in HBase * [HBase] Add migration script for new row separate design * Imported Translations from Transifex * Include a 'node' key and value in ipmi metadata * Updated from global requirements * Run unit tests against PostgreSQL * create skeleton files for event storage backends * Imported Translations from Transifex * isolate event storage models * Fix neutron client to catch 404 exceptions * Run unit tests against MySQL * Updated from global requirements * Correct JSON-based query examples in documentation * Open Kilo development * Add cfg.CONF.import_group for service_credentials * Fix OrderedDict usage for Python 2.6 * clean path in swift middleware * Partition static resources defined in pipeline.yaml * Per-source separation of static resources & discovery * dbsync: Acknowledge 'metering_connection' option * Fix bug in the documentation * Use oslo.msg retry API in rpc publisher * Describe API versions * Change compute agent recurring logs from INFO to DEBUG * Fix bug with wrong bool opt value interpolation * [HBase] Improves speed of unit tests on real HBase backend * Imported Translations from Transifex * Removed unused abc meta class * update references to auth_token middleware * clean up swift middleware to avoid unicode errors * [HBase] Catch AlreadyExists error in Connection upgrade * Use None instead of mutables in method params default values * Updated from global requirements * Enable to get service types from configuration file * test db2 driver code * Docs: Add description of pipeline discovery section * Typo "possibilites" should be "possibilities" * Modified docs to update DevStack's config filename * Add an API configuration section to docs * Tune up mod_wsgi settings in example configuration * Allow pecan debug middleware to be turned off * Provide __repr__ for SampleFilter * Eliminate unnecessary search for test cases * Switch to a custom NotImplementedError * minimise ceilometer memory usage * Partition swift pollster resources by tenant * Add IPMI pollster * Add IPMI support * Stop using intersphinx * Use central agent manager's keystone token in discoveries * Handle invalid JSON filters from the input gracefully * Sync jsonutils for namedtuple_as_object fix * ceilometer spamming syslog * Timestamp bounds need not be tight (per ceilometer 1288372) * Allow to pass dict from resource discovery * fix network discovery meters * switch to sqlalchemy core * Imported Translations from Transifex * Improve the timestamp validation of ceilometer API * Update docs with Sahara notifications configuration * Migrate the rest of the central agent pollsters to use discoveries * Add documentation for implemented identity meters * Fix tests with testtools>=0.9.39 * Document the standard for PaaS service notifications * Returns 401 when unauthorized project access occurs * Adding another set of hardware metrics * normalise resource data * warn against sorting requirements * Add validate alarm_actions schema in alarm API * Fix help strings * Imported Translations from Transifex * Switch partitioned alarm evaluation to a hash-based approach * Central agent work-load partitioning * collector: Allows to requeue a sample * Typo fixed * Switch to oslo.serialization * Document pipeline publishers configuration * Alarm: Use stevedore to load the service class * Enhance compute diskio tests to handle multi instance * Adding comparison operators in query for event traits * XenAPI support: Update measurements documentation * update requirements * add documentation for setting up api pipeline * Permit usage of notifications for metering * XenAPI support: Disk rates * XenAPI support: Changes for networking metrics * XenAPI support: Memory Usage * XenAPI support: Changes for cpu_util * XenAPI support: List the instances * Rebase hardware pollsters to use new inspector interface * Switch to use oslo.db * Remove oslo middleware * Adding quotas on alarms * Add an exchange for Trove in profiler notification plugin * Simplify chained comparisons * In-code comments should start with `#`, not with `"""` * Remove redundant parentheses * skip polls if service is not registered * re-add hashseed to avoid gate error * Switch to oslo.utils * Switch to oslotest * Handle sqlalchemy connection strings with drivers * Rewrite list creation as a list literal * Rewrite dictionary creation as a dictionary literal * Triple double-quoted strings should be used for docstrings * Add upgrading alarm storage in dbsync * Improving of configuration.rst * Fix typos in transformer docstrings * Update tox.ini pep8 config to ignore i18n functions * Added new hardware inspector interface * compute: fix wrong test assertion * sync olso-incubator code * VMware: Support secret host_password option * refactor filter code in sql backend * Support for per disk volume measurements * Use a FakeRequest object to test middleware * Imported Translations from Transifex * Improve api_paste_config file searching * [Hbase] Add column for source filter in _get_meter_samples * Issue one SQL statement per execute() call * Allow tests to run outside tox * [HBase] Refactor hbase.utils * Set page size when Glance API request is called * Adding init into tools folder * Enhancing the make_test_data script * correct DB2 installation supported features documentation * Avoid duplication of discovery for multi-sink sources * Improve performance of libvirt inspector requests * Documented Stevedore usage and source details * Add notifications for identity authenticate events * Add message translate module in vmware inspector * Handle Cinder attach and detach notifications * [HBase] Improve uniqueness for row in meter table * Doc enhancement for API service deployment with mod_wsgi * Update documentation for new transformer * Add the arithmetic transformer endpoint to setup.cfg * Imported Translations from Transifex * Fix unit for vpn connection metric * Debug env for tox * Change spelling mistakes * Use auth_token from keystonemiddleware * Fix dict and set order related issues in tests * Fix listener for update.start notifications * Sahara integration with Ceilometer * Add notifications for identity CRUD events * Extracting make_resource_metadata method * Fix make_test_data tools script * Add cumulative and gauge to aggregator transformer * Enable some tests against py33 * Remove --tmpdir from mktemp * Replace dict.iteritems() with six.iteritems(dict) * Replace iterator.next() with next(iterator) * Fix aggregator flush method * Automatic discovery of TripleO Overcloud hardware * Set python hash seed to 0 in tox.ini * Don't override the original notification message * Remove ConnectionProxy temporary class * Move sqlalchemy alarms driver code to alarm tree * basestring replaced with six.string_types * Correct misspelled words * Add retry function for alarm REST notifier * Move hbase alarms driver code to alarm tree * Update measurement docs for FWaaS * Update measurement docs for VPNaaS * Follow up fixes to network services pollsters * Updated from global requirements * Implement consuming ipmi notifications from Ironic * Support for metering FWaaS * Adds Content-Type to alarm REST notifier * Multi meter arithmetic transformer * Remove redudent space in doc string * Use None instead of mutables in test method params defaults * Add support for metering VPNaaS * Use resource discovery for Network Services * Change of get_events and get_traits method in MongoDB and Hbase * Fix two out-dated links in doc * Move log alarms driver code to alarm tree * Separate the console scripts * clean up event model * improve expirer performance for sql backend * Move mongodb/db2 alarms driver code to alarm tree * Allow to have different DB for alarm and metering * Replace datetime of time_constraints by aware object * Sync oslo log module and its dependencies * Use hmac.compare_digest to compare signature * Add testcase for multiple discovery-driven sources * Fixes aggregator transformer timestamp and user input handling * Improves pipeline transformer documentation * Fix incorrect use of timestamp in test * Add keystone control exchange * Fix call to meter-list in measurements doc * Remove redundant parentheses * [Mongodb] Implement events on Mongodb and DB2 * Fix typos in code comments & docstrings * Make the error message of alarm-not-found clear * Fix SQL exception getting statitics with metaquery * Remove docutils pin * update default_log_levels set by ceilometer * Fix annoying typo in partition coordinator test * Transform sample_cnt type to int * Remove useless sources.json * Fix H405 violations and re-enable gating * Fix H904 violations and re-enable gating * Fix H307 violations and re-enable gating * Fix the section name in CONTRIBUTING.rst * Added osprofiler notifications plugin * Improve a bit performance of Ceilometer * Revert "Align to openstack python package index mirror" * Fix aggregator _get_unique_key method * Remove meter hardware.network.bandwidth.bytes * Fix F402 violations and re-enable gating * Fix E265 violations and re-enable gating * Fix E251 violations and re-enable gating * Fix E128 violations and re-enable gating * Fix E126,H104 violations and re-enable gating * Bump hacking to 0.9.x * Fixed various import issues exposed by unittest * use urlparse from six * clean up sample index * Fix HBase available capabilities list * Updated from global requirements * VMware:Update the ceilometer doc with VMware opts * Handle non-ascii character in meter name * Add log output of "x-openstack-request-id" from nova * Imported Translations from Transifex * fix StringIO errors in unit test * Fix hacking rule 302 and enable it * Imported Translations from Transifex * sync oslo code * Fixes ceilometer-compute service start failure * Reenables the testr per test timeout * Avoid reading real config files in unit test * Clean up oslo.middleware.{audit,notifier} * Use hacking from test-requirements * Splits hbase storage code base * Splits mongo storage code base * Separate alarm storage models from other models * Iterates swift response earlier to get the correct status * Fix messaging.get_transport caching * Fix method mocked in a test * Don't keep a single global TRANSPORT object * Clean up .gitignore * Fix Sphinx directive name in session.py * Fix list of modules not included in auto-gen docs * Downgrade publisher logging to debug level again * remove default=None for config options * [HBase] get_resource optimization * Fix incorrect trait initialization * Remove unused logging in tests * Revert "Fix the floatingip pollster" * Remove low-value logging from publication codepath * Fix LBaaS connection meter docs * Fix the meter type for LB Bytes * Adding alarm list filtering by state and meter * Adds caches for image and flavor in compute agent * [HBase] Implement events on HBase * Skipping central agent pollster when keystone not available * Respect $TMPDIR environment variable to run tests * Fixed unit test TestRealNotification * Update Measurement Docs for LBaaS * Metering LoadBalancer as a Service * Removes per test testr timeout * Change pipeline_manager to instance attribute in hooks * Change using of limit argument in get_sample * Refactor tests to remove direct access to test DBManagers * Fix notification for NotImplemented record_events * Add missing explicit cfg option import * Fix ceilometer.alarm.notifier.trust import * Use TYPE_GAUGE rather than TYPE_CUMULATIVE * Update doc for sample config file issue * Corrects a flaw in the treatment of swift endpoints * use LOG instead of logger as name for the Logger object * Fix doc gate job false success * Improve performance of api requests with hbase scan * Add new 'storage': {'production_ready': True} capability * Clean tox.ini * Remove (c) and remove unnecessary encoding lines * Fix testing gate due to new keystoneclient release * Ignore the generated file ceilometer.conf.sample * Update the copyright date in doc * Updated from global requirements * reconnect to mongodb on connection failure * refactor sql backend to improve write speed * Don't rely on oslomsg configuration options * replaced unicode() with six.text_type() * Synced jsonutils from oslo-incubator * Fix the floatingip pollster * Fix project authorization check * Update testrepository configuration * Implemented metering for Cinder's snapshots * Use joins instead of subqueries for metadata filtering * Use None instead of mutables in method params defaults * Remove all mostly untranslated PO files * switch SplitResult to use six * Remove unused db code due to api v1 drop * Updated from global requirements * oslo.messaging context must be a dict * Drop deprecated api v1 * Fix network notifications of neutron bulk creation * mongo: remove _id in inserted alarm changes * Clean up openstack-common.conf * Revert "oslo.messaging context must be a dict" * Correct class when stopping partitioned alarm eval svc * oslo.messaging context must be a dict * Corrections of spelling, rephrasing for clarity * Adapt failing tests for latest wsme version * Removed StorageEngine class and it's hierarchy * Correcting formatting and adding period in measurement doc * Initialize dispatcher manager in event endpoint * Replaced CONF object with url in storage engine creation * Synced jsonutils from oslo-incubator * Remove gettextutils._ imports where they are not used * Remove "# noqa" leftovers for gettextutils._ * transformer: Add aggregator transformer * Remove conversion debug message * Fix the return of statistic with getting no sample * Remove eventlet.sleep(0) in collector tests * Don't allow queries with 'IN' predicate with an empty sequence * Check if samples returned by get_sample_data are not None * Opencontrail network statistics driver * Add a alarm notification using trusts * Replace hard coded WSGI application creation * Describe storage backends in the collector installation guide * Made get_capabilities a classmethod instead of object method * Disable reverse dns lookup * Consume notif. from multiple message bus * Use NotificationPlugin as an oslo.msg endpoint * Improve combination rule validation * Remove ceilometer.conf.sample * Use known protocol scheme in keystone tests * cleanup virt pollster code * Add encoding argument to deserialising udp packets in collector * Made get_engine method module-private * Make entities (Resource, User, Project) able to store lists * Remove duplicate alarm from alarm_ids * More accurate meter name and unit for host load averages * Replace oslo.rpc by oslo.messaging * Fix a response header bug in the error middleware * Remove unnecessary escape character in string format * Optimize checks to set image properties in metadata * fix statistics query in postgres * Removed useless code from __init__ method * Refactored fake connection URL classes * Replace assert statements with assert methods * Removes direct access of timeutils.override_time * Disable specifying alarm itself in combination rule * Include instance state in metadata * Allowed nested resource metadata in POST'd samples * Sync oslo-incubator code * Updated from global requirements * Refactor the DB implementation of Capabilities API * Fix Jenkins translation jobs * Align to openstack python package index mirror * User a more accurate max_delay for reconnects * Open Juno development * Imported Translations from Transifex * Add note on aggregate duplication to API docco * Use ConectionPool instead of one Connection in HBase * remove dump tables from previous migrations * De-dupe selectable aggregate list in statistics API * ensure dispatcher service is configured before rpc * improve performance of resource-list in sql * SSL errors thrown with Postgres on multi workers * Remove escape character in string format * Verify user/project ID for alarm created by non-admin user * enable a single worker by default * Fix ceilometer.conf.sample mismatch * Metadata in compute.instance.exists fix * Fix order of arguments in assertEquals * Documenting hypervisor support for nova meters * Ensure idempotency of cardinality reduction in mongo * VMware vSphere: Improve the accuracy of queried samples * Use swob instead of webob in swift unit tests * Disable oslo.messaging debug logs * Fix validation error for invalid field name in simple query * fix create_or_update logic to avoid rollbacks * Avoid swallowing AssertionError in test skipping logic * Fix hardware pollster to inspect multiple resources * spawn multiple workers in services * Install global lazy _() * Fixes Hyper-V metrics units * Ensure intended indices on project_id are created for mongo * Fix the type of the disk IO rate measurements * Change the sample_type from tuple to string * Fix order of arguments in assertEquals * Ensure alarm rule conform to alarm type * insecure flag added to novaclient * Fixes duplicated names in alarm time constraints * Use the list when get information from libvirt * Eventlet monkeypatch must be done before anything * 028 migration script incorrectly skips over section * Fix bug in get_capabilities behavior in DB drivers * Added documentation for selectable aggregates * Make sure use IPv6 sockets for ceilometer in IPv6 environment * VMware vSphere: Bug fixes * Ensure insecure config option propagated by alarm evaluator * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Fix order of arguments in assertEquals * Rationalize get_resources for mongodb * Ensure insecure config option propagated by alarm service * add host meters to doc * Add field translation to complex query from OldSample to Sample * Extend test case to cover old alarm style conversion * Updated doc with debug instructions * Refactored the way how testscenarios tests are run * Corrected the sample names in hardware pollsters * Prevent alarm_id in query field of getting history * Make ceilometer work with sqla 0.9.x * Implements monitoring-network-from-opendaylight * Add user-supplied arguments in log_handler * VMware vSphere support: Disk rates * Fix updating alarm can specify existing alarm name * Changes for networking metrics support for vSphere * VMware vSphere: Changes for cpu_util * VMware vSphere support: Memory Usage * Fix broken statistics in sqlalchemy * Fixes Hyper-V Inspector network metrics values * Set storage engine for the trait_type table * Enable monkeypatch for select module * Rename id to alarm_id of Alarm in SqlAlchemy * Fix some spelling mistakes and a incorrect url * Skip central agent interval_task when keystone fails * Ensure user metadata mapped for instance notifications * Per pipeline pluggable resource discovery * Wider selection of aggregates for sqlalchemy * Wider selection of aggregates for mongodb * Adds time constraints to alarms * Remove code duplication Part 3 * Decouple source and sink configuration for pipelines * Selectable aggregate support in mongodb * Selectable aggregation functions for statistics * Add simple capabilities API * Removed global state modification by api test * VMware vSphere support: Performance Mgr APIs * Fix typo * move databases to test requirements * Make recording and scanning data more determined * Implements "not" operator for complex query * Implements metadata query for complex query feature * Alarms support in HBase Part 2 * Alarm support in HBase Part 1 * Remove unused variable * Added hardware pollsters for the central agent * Added hardware agent's inspector and snmp implementation * Updated from global requirements * Pluggable resource discovery for agents * Remove code duplication Part 2 * Imported Translations from Transifex * remove audit logging on flush * Tolerate absent recorded_at on older mongo/db2 samples * api: export recorded_at in returned samples * Fix the way how metadata is stored in HBase * Set default log level of iso8601 to WARN * Sync latest config file generator from oslo-incubator * Fix typo on testing doc page * Remove code duplication * sample table contains redundant/duplicate data * rename meter table to sample * storage: store recording timestamp * Fixed spelling error in Ceilometer * Adds doc string to query validate functions in V2 API * Updated from global requirements * Remove code that works around a (now-resolved) bug in pecan * Fix missing source field content on /v2/samples API * Refactor timestamp existence validation in V2 API * Use the module units to refer bytes type * sync units.py from oslo to ceilometer * Add comments for _build_paginate_query * Implements monitoring-network * Handle Heat notifications for stack CRUD * Alembic migrations not tested * Modify the discription of combination alarm * check domain state before inspecting nics/disks * Adds gettextutils module in converter * Keep py3.X compatibility for urllib.urlencode * Added missing import * Removed useless prints that pollute tests log * Implements in operator for complex query functionality * Implements field validation for complex query functionality * allow hacking to set dependencies * Implements complex query functionality for alarm history * Implements complex query functionality for alarms * Remove None for dict.get() * Replace assertEqual(None, *) with assertIsNone in tests * Update notification_driver * Switch over to oslosphinx * Fix some flaws in ceilometer docstrings * Rename Openstack to OpenStack * Remove start index 0 in range() * Updated from global requirements * Remove blank line in docstring * Use six.moves.urllib.parse instead of urlparse * Propogate cacert and insecure flags to glanceclient * Test case for creating an alarm without auth headers * Refactored run-tests script * Implements complex query functionality for samples * fix column name and alignment * Remove tox locale overrides * Updated from global requirements * Adds flavor_id in the nova_notifier * Improve help strings * service: re-enable eventlet just for sockets * Fixes invalid key in Neutron notifications * Replace BoundedInt with WSME's IntegerType * Replace 'Ceilometer' by 'Telemetry' in the generated doc * Doc: Add OldSample to v2.rst * Fixing some simple documentation typos * Updated from global requirements * Fix for a simple typo * Replace 'a alarm' by 'an alarm' * Move ceilometer-send-counter to a console script * sync oslo common code * Handle engine creation inside of Connection object * Adds additional details to alarm notifications * Fix formating of compute-nova measurements table * Fix string-to-boolean casting in queries * nova notifier: disable tests + update sample conf * Update oslo * Refactored session access * Fix the py27 failure because of "ephemeral_key_uuid" error * Correct a misuse of RestController in the Event API * Fix docs on what an instance meter represents * Fix measurement docs to correctly represent Existance meters * samples: fix test case status code check * Replace non-ascii symbols in docs * Use swift master * Add table prefix for unit tests with hbase * Add documentation for pipeline configuration * Remove unnecessary code from alarm test * Updated from global requirements * Use stevedore's make_test_instance * use common code for migrations * Use explicit http error code for api v2 * Clean .gitignore * Remove unused db engine variable in api * Revert "Ensure we are not exhausting the sqlalchemy pool" * eventlet: stop monkey patching * Update dev docs to include notification-agent * Change meter_id to meter_name in generated docs * Correct spelling of logger for dispatcher.file * Fix some typos in architecture doc * Drop foreign key contraints of alarm in sqlalchemy * Re-enable lazy translation * Sync gettextutils from Oslo * Fix wrong doc string for meter type * Fix recursive_keypairs output * Added abc.ABCMeta metaclass for abstract classes * Removes use of timeutils.set_time_override * tests: kill all started processes on exit * Exclude weak datapoints from alarm threshold evaluation * Move enable_acl and debug config to ceilometer.conf * Fix the Alarm documentation of Web API V2 * StringIO compatibility for python3 * Set the SQL Float precision * Convert alarm timestamp to PrecisionTimestamp * use six.move.xrange replace xrange * Exit expirer earlier if db-ttl is disabled * Added resources support in pollster's interface * Improve consistency of help strings * assertTrue(isinstance) replace by assertIsInstance * Return trait type from Event api * Add new rate-based disk and network pipelines * Name and unit mapping for rate_of_change transformer * Update oslo * Remove dependencies on pep8, pyflakes and flake8 * Implement the /v2/samples/ API * Fix to handle null threshold_rule values * Use DEFAULT section for dispatcher in doc * Insertion in HBase should be fixed * Trivial typo * Update ceilometer.conf.sample * Fix use the fact that empty sequences are false * Remove unused imports * Replace mongo aggregation with plain ol' map-reduce * Remove redundant meter (name,type,unit) tuples from Resource model * Fix work of udp publisher * tests: pass /dev/null as config for mongod * requirements: drop netaddr * tests: allow to skip if no database URL * Fix to tackle instances without an image assigned * Check for pep8 E226 and E24 * Fixed spelling mistake * AlarmChange definition added to doc/source/webapi/v2.rst * 1st & last sample timestamps in Resource representation * Avoid false negatives on message signature comparison * cacert is not picked up correctly by alarm services * Change endpoint_type parameter * Utilizes assertIsNone and assertIsNotNone * Add missing gettextutils import to ceilometer.storage.base * Remove redundant code in nova_client.Client * Allow customized reseller_prefix in Ceilometer middleware for Swift * Fix broken i18n support * Empty files should no longer contain copyright * Add Event API * Ensure we are not exhausting the sqlalchemy pool * Add new meters for swift * Sync config generator workaround from oslo * storage: factorize not implemented methods * Don't assume alarms are returned in insert order * Correct env variable in file oslo.config.generator.rc * Handle the metrics sent by nova notifier * Add a wadl target to the documentation * Sync config generator from oslo-incubator * Convert event timestamp to PrecisionTimestamp * Add metadata query validation limitation * Ensure the correct error message is displayed * Imported Translations from Transifex * Move sphinxcontrib-httpdomain to test-requirements * Ensure that the user/project exist on alarm update * api: raise ClientSideError rather than ValueError * Implement the /v2/sample API * service: fix service alive checking * Oslo sync to recover from db2 server disconnects * Event Storage Layer * config: specify a template for mktemp * test code should be excluded from test coverage summary * doc: remove note about Nova plugin framework * doc: fix formatting of alarm action types * Updated from global requirements * Add configuration-driven conversion to Events * add newly added constraints to expire clear_expired_metering_data * fix unit * Add import for publisher_rpc option * add more test cases to improve the test code coverage #5 * Create a shared queue for QPID topic consumers * Properly reconnect subscribing clients when QPID broker restarts * Don't need session.flush in context managed by session * sql migration error in 020_add_metadata_tables * Remove rpc service from agent manager * Imported Translations from Transifex * organise requirements files * Add a Trait Type model and db table * No module named MySQLdb bug * Add a note about permissions to ceilometer logging directory * sync with oslo-incubator * Rename OpenStack Metering to OpenStack Telemetry * update docs to adjust for naming change * Add i18n warpping for all LOG messages * Imported Translations from Transifex * Removed unused method in compute agent manger * connection is not close in migration script * Fixed a bug in sql migration script 020 * Fixed nova notifier test * Added resources definition in the pipeline * Change metadata_int's value field to type bigint * Avoid intermittent integrity error on alarm creation * Simplify the dispatcher method prototype * Use map_method from stevedore 0.12 * Remove the collector submodule * Move dispatcher a level up * Split collector * Add a specialized Event Type model and db table * Remove old sqlalchemy-migrate workaround * Revert "Support building wheels (PEP-427)" * full pep8 compliance (part 2) * Selectively import RPC backend retry config * Fixes Hyper-V Inspector disk metrics bug * Imported Translations from Transifex * full pep8 compliance (part1) * Replace mox with mock in alarm,central,image tests * Stop ignoring H506 errors * Update hacking for real * Replace mox with mock in tests.collector * Replace mox with mock in publisher and pipeline * Replace mox with mock in novaclient and compute * Remove useless defined Exception in tests * Support building wheels (PEP-427) * Fixes Hyper-V Inspector cpu metrics bug * Replace mox with mock in tests.storage * Document user-defined metadata for swift samples * Replace mox with mock in energy and objectstore * Updated from global requirements * Replace mox with mock in tests.api.v2 * Refactor API error handling * make record_metering_data concurrency safe * Move tests into ceilometer module * Replace mox with mock in tests.api.v1 * Replace mox with mock in tests.api.v2.test_compute * Corrected import order * Use better predicates from testtools instead of plain assert * Stop using openstack.common.exception * Replace mox with mock in tests.network * Replace mox with mocks in test_inspector * Fix failing nova_tests tests * Replace mox with mocks in tests.compute.pollsters * Add an insecure option for Keystone client * Sync log from oslo * Cleanup tests.publisher tests * mongodb, db2: do not print full URL in logs * Use wsme ClientSideError to handle unicode string * Use consistant cache key for swift pollster * Fix the developer documentation of the alarm API * Fix the default rpc policy value * Allow Events without traits to be returned * Replace tests.base part8 * Replace tests.base part7 * Replace tests.base part6 * Imported Translations from Transifex * Imported Translations from Transifex * Sync log_handler from Oslo * Don't use sqlachemy Metadata as global var * enable sql metadata query * Replace tests.base part5 * Replace tests.base part4 * Imported Translations from Transifex * Updated from global requirements * Fix doc typo in volume meter description * Updated from global requirements * Add source to Resource API object * compute: virt: Fix Instance creation * Fix for get_resources with postgresql * Updated from global requirements * Add tests when admin set alarm owner to its own * Replace tests.base part3 * Replace tests.base part2 * Replace tests.base part1 * Fix wrong using of Metadata in 15,16 migrations * api: update for WSME 0.5b6 compliance * Changes FakeMemcache to set token to expire on utcnow + 5 mins * Change test case get_alarm_history_on_create * Change alarm_history.detail to text type * Add support for keystoneclient 0.4.0 * Ceilometer has no such project-list subcommand * Avoid leaking admin-ness into combination alarms * Updated from global requirements * Avoid leaking admin-ness into threshold-oriented alarms * Update Oslo * Set python-six minimum version * Ensure combination alarms can be evaluated * Ensure combination alarm evaluator can be loaded * Apply six for metaclass * add more test cases to improve the test code coverage #6 * Update python-ceilometerclient lower bound to 1.0.6 * Imported Translations from Transifex * add more test cases to improve the test code coverage #4 * db2 does not allow None as a key for user_id in user collection * Start Icehouse development * Imported Translations from Transifex * Disable lazy translation * Add notifications for alarm changes * Updated from global requirements * api: allow alarm creation for others project by admins * assertEquals is deprecated, use assertEqual * Imported Translations from Transifex * update alarm service setup in dev doc * Add bug number of some wsme issue * api: remove useless comments * issue an error log when cannot import libvirt * add coverage config file to control module coverage report * tests: fix rounding issue in timestamp comparison * api: return 404 if a alarm is not found * remove locals() for stringformat * add more test cases to improve the test code coverage #3 * Remove extraneous vim configuration comments * Return 401 when action is not authorized * api: return 404 if a resource is not found * keystone client changes in AuthProtocol made our test cases failing * Don't load into alarms evaluators disabled alarms * Remove MANIFEST.in * Allow to get a disabled alarm * Add example with return values in API v2 docs * Avoid imposing alembic 6.0 requirement on all distros * tests: fix places check for timestamp equality * Don't publish samples if resource_id in missing * Require oslo.config 1.2.0 final * Don't send unuseful rpc alarm notification * service: check that timestamps are almost equals * Test the response body when deleting a alarm * Change resource.resource_metadata to text type * Adding region name to service credentials * Fail tests early if mongod is not found * add more test cases to improve the test code coverage #2 * add more test cases to improve the test code coverage #1 * Imported Translations from Transifex * Replace OpenStack LLC with OpenStack Foundation * Use built-in print() instead of print statement * Simple alarm partitioning protocol based on AMQP fanout RPC * Handle manually mandatory field * Provide new API endpoint for alarm state * Implement the combination evaluator * Add alarm combination API * Notify with string representation of alarm reason * Convert BoundedInt value from json into int * Fix for timestamp precision in SQLAlchemy * Add source field to Meter model * Refactor threshold evaluator * Alarm API update * Update requirements * WSME 0.5b5 breaking unit tests * Fix failed downgrade in migrations * refactor db2 get_meter_statistics method to support mongodb and db2 * tests: import pipeline config * Fix a tiny mistake in api doc * collector-udp: use dispatcher rather than storage * Imported Translations from Transifex * Drop sitepackages=False from tox.ini * Update sphinxcontrib-pecanwsme to 0.3 * Architecture enhancements * Force MySQL to use InnoDB/utf8 * Update alembic requirement to 0.6.0 version * Correctly output the sample content in the file publisher * Pecan assuming meter names are extensions * Handle inst not found exceptions in pollsters * Catch exceptions from nova client in poll_and_publish * doc: fix storage backend features status * Add timestamp filtering cases in storage tests * Imported Translations from Transifex * Use global openstack requirements * Add group by statistics examples in API v2 docs * Add docstrings to some methods * add tests for _query_to_kwargs func * validate counter_type when posting samples * Include auth_token middleware in sample config * Update config generator * run-tests: fix MongoDB start wait * Imported Translations from Transifex * Fix handling of bad paths in Swift middleware * Drop the *.create.start notification for Neutron * Make the Swift-related doc more explicit * Fix to return latest resource metadata * Update the high level architecture * Alarm history storage implementation for sqlalchemy * Improve libvirt vnic parsing with missing mac! * Handle missing libvirt vnic targets! * Make type guessing for query args more robust * add MAINTAINERS file * nova_notifier: fix tests * Update openstack.common.policy from oslo-incubator * Clean-ups related to alarm history patches * Improved MongoClient pooling to avoid out of connections error * Disable the pymongo pooling feature for tests * Fix wrong migrations * Fixed nova notifier unit test * Add group by statistics in API v2 * Update to tox 1.6 and setup.py develop * Add query support to alarm history API * Reject duplicate events * Fixes a bug in Kwapi pollster * alarm api: rename counter_name to meter_name * Fixes service startup issue on Windows * Handle volume.resize.* notifications * Network: process metering reports from Neutron * Alarm history storage implementation for mongodb * Fix migration with fkeys * Fixes two typos in this measurements.rst * Add a fake UUID to Meter on API level * Append /usr/sbin:/sbin to the path for searching mongodb * Plug alarm history logic into the API * Added upper version boundry for six * db2 distinct call results are different from mongodb call * Sync rpc from oslo-incubator * Imported Translations from Transifex * Add pagination parameter to the database backends of storage * Base Alarm history persistence model * Fix empty metadata issue of instance * alarm: generate alarm_id in API * Import middleware from Oslo * Imported Translations from Transifex * Adds group by statistics for MongoDB driver * Fix wrong UniqueConstraint name * Adds else and TODO in statistics storage tests * Imported Translations from Transifex * Extra indexes cleanup * API FunctionalTest class lacks doc strings * install manual last few sections format needs to be fixed * api: update v1 for Flask >= 0.10 * Use system locale when Accept-Language header is not provided * Adds Hyper-V compute inspector * missing resource in middleware notification * Support for wildcard in pipeline * Refactored storage tests to use testscenarios * doc: replace GitHub by git.openstack.org * api: allow usage of resource_metadata in query * Remove useless doc/requirements * Fixes non-string metadata query issue * rpc: reduce sleep time * Move sqlachemy tests only in test_impl_sqlachemy * Raise Error when pagination/groupby is missing * Raise Error when pagination support is missing * Use timeutils.utcnow in alarm threshold evaluation * db2 support * plugin: remove is_enabled * Doc: improve doc about Nova measurements * Storing events via dispatchers * Imported Translations from Transifex * ceilometer-agent-compute did not catch exception for disk error * Change counter to sample in network tests * Change counter to sample in objectstore tests * Remove no more used code in test_notifier * Change counter to sample vocable in cm.transformer * Change counter to sample vocable in cm.publisher * Change counter to sample vocable in cm.image * Change counter to sample vocable in cm.compute * Change counter to sample vocable in cm.energy * Use samples vocable in cm.publisher.test * Change counter to sample vocable in volume tests * Change counter to sample vocable in api tests * Add the source=None to from_notification * Make RPCPublisher flush method threadsafe * Enhance delayed message translation when _ is imported * Remove use_greenlets argument to MongoClient * Enable concurrency on nova notifier tests * Imported Translations from Transifex * Close database connection for alembic env * Fix typo in 17738166b91 migration * Don't call publisher without sample * message_id is not allowed to be submitted via api * Api V2 post sample refactoring * Add SQLAlchemy implementation of groupby * Fixes failed notification when deleting instance * Reinitialize pipeline manager for service restart * Sync gettextutils from oslo-incubator * Doc: clearly state that one can filter on metadata * Add HTTP request/reply samples * Use new olso fixture in CM tests * Imported Translations from Transifex * Bump hacking to 0.7.0 * Fix the dict type metadata missing issue * Raise error when period with negative value * Imported Translations from Transifex * Import missing gettext _ * Remove 'counter' occurences in pipeline * Remove the mongo auth warning during tests * Change the error message of resource listing in mongodb * Change test_post_alarm case in test_alarm_scenarios * Skeletal alarm history API * Reorg alarms controller to facilitate history API * Fix Jenkins failed due to missing _ * Fix nova test_notifier wrt new notifier API * Remove counter occurences from documentation * Updated from global requirements * Fixes dict metadata query issue of HBase * s/alarm/alarm_id/ in alarm notification * Remove unused abstract class definitions * Removed unused self.counters in storage test class * Initial alarming documentation * Include previous state in alarm notification * Consume notification from the default queue * Change meter.resource_metadata column type * Remove MongoDB TTL support for MongoDB < 2.2 * Add first and last sample timestamp * Use MongoDB aggregate to get resources list * Fix resources/meters pagination test * Handle more Nova and Neutron events * Add support for API message localization * Add the alarm id to the rest notifier body * fix alarm notifier tests * Sync gettextutils from oslo * Fix generating coverage on MacOSX * Use the new nova Instance class * Return message_id in POSTed samples * rpc: remove source argument from message conversion * Remove source as a publisher argument * Add repeat_actions to alarm * Rename get_counters to get_samples * Add pagination support for MongoDB * Doc: measurements: add doc on Cinder/Swift config * Update nova_client.py * objectstore: trivial cleanup in _Base * Add support for CA authentication in Keystone * add unit attribute to statistics * Fix notify method signature on LogAlarmNotifier * Fix transformer's LOG TypeError * Update openstack.common * Fixes Hbase metadata query return wrong result * Fix Hacking 0.6 warnings * Make middleware.py Python 2.6 compatible * Call alembic migrations after sqlalchemy-migrate * Rename ceilometer.counter to ceilometer.sample * Added separate MongoDB database for each test * Relax OpenStack upper capping of client versions * Refactored MongoDB connection pool to use weakrefs * Centralized backends tests scenarios in one place * Added tests to verify that local time is correctly handled * Refactored impl_mongodb to use full connection url * calling distinct on _id field against a collection is slow * Use configured endpoint_type everywhere * Allow use of local conductor * Update nova configuration doc to use notify_on_state_change * doc: how to inject user-defined data * Add documentation on nova user defined metadata * Refactored API V2 tests to use testscenarios * Refactored API V1 tests to use testscenarios * alarm: Per user setting to disable ssl verify * alarm: Global setting to disable ssl verification * Imported Translations from Transifex * Implementation of the alarm RPCAlarmNotifier * Always init cfg.CONF before running a test * Sets storage_conn in CollectorService * Remove replace/preserve logic from rate of change transformer * storage: remove per-driver options * hbase: do not register table_prefix as a global option * mongodb: do not set replica_set as a global option * Change nose to testr in the documentation * Fixed timestamp creation in MongoDB mapreduce * Ensure url is a string for requests.post * Implement a https:// in REST alarm notification * Implement dot in matching_metadata key for mongodb * trailing slash in url causes 404 error * Fix missing foreign keys * Add cleanup migration for indexes * Sync models with migrations * Avoid dropping cpu_util for multiple instances * doc: /statistics fields are not queryable (you cannot filter on them) * fix resource_metadata failure missing image data * Standardize on X-Project-Id over X-Tenant-Id * Default to ctx user/project ID in sample POST API * Multiple dispatcher enablement * storage: fix clear/upgrade order * Lose weight for Ceilometer log in verbose mode * publisher.rpc: queing policies * Remove useless mongodb connection pool comment * Add index for db.meter by descending timestamp * doc: add a bunch of functional examples for the API * api: build the storage connection once and for all * Fix the argument of UnknownArgument exception * make publisher procedure call configurable * Disable mongod prealloc, wait for it to start * Added alembic migrations * Allow to enable time to live on metering sample * Implement a basic REST alarm notification * Imported Translations from Transifex * Ensure correct return code of run-tests.sh * File based publisher * Unset OS_xx variable before generate configuration * Use run-tests.sh for tox coverage tests * Emit cpu_util from transformer instead of pollster * Allow simpler scale exprs in transformer.conversions * Use a real MongoDB instance to run unit tests * Allow to specify the endpoint type to use * Rename README.md to README.rst * Use correct hostname to get instances * Provide CPU number as additional metadata * Remove get_counter_names from the pollster plugins * Sync SQLAlchemy models with migrations * Transformer to measure rate of change * Make sure plugins are named after their meters * Break up the swift pollsters * Split up the glance pollsters * Make visual coding style consistent * Separate power and energy pollsters * Break up compute pollsters * Implement a basic alarm notification service * Optionally store Events in Collector * Fix issue with pip installing oslo.config-1.2.0 * Transformer to convert between units * publisher.rpc: make per counter topic optional * ceilometer tests need to be enabled/cleaned * Also accept timeout parameter in FakeMemCache * Fix MongoDB backward compat wrt units * Use oslo.sphinx and remove local copy of doc theme * Reference setuptools and not distribute * enable v2 api hbase tests * Register all interesting events * Unify Counter generation from notifications * doc: enhance v2 examples * Update glossary * Imported Translations from Transifex * Imported Translations from Transifex * Filter query op:gt does not work as expected * sqlalchemy: fix performance issue on get_meters() * enable v2 api sqlalchemy tests * Update compute vnic pollster to use cache * Update compute CPU pollster to use cache * Update compute disk I/O pollster to use cache * update Quantum references to Neutron * Update swift pollster to use cache * Update kwapi pollster to use cache * Update floating-ip pollster to use cache * Update glance pollster to use cache * Add pollster data cache * Fix flake8 errors * Update Oslo * Enable Ceilometer to support mongodb replication set * Fix return error when resource can't be found * Simple service for singleton threshold eval * Basic alarm threshold evaluation logic * add metadata to nova_client results * Bring in oslo-common rpc ack() changes * Pin the keystone client version * Fix auth logic for PUT /v2/alarms * Imported Translations from Transifex * Change period type in alarms API to int * mongodb: fix limit value not being an integer * Check that the config file sample is always up to date * api: enable v2 tests on SQLAlchemy & HBase * Remove useless periodic_interval option * doc: be more explicit about network counters * Capture instance metadata in reserved namespace * Imported Translations from Transifex * pep8: enable E125 checks * pep8: enable F403 checks * pep8: enable H302 checks * pep8: enable H304 checks * pep8: enable H401 * pep8: enable H402 checks * Rename the MeterPublisher to RPCPublisher * Replace publisher name by URL * Enable pep8 H403 checks * Activate H404 checks * Ceilometer may generate wrong format swift url in some situations * Code cleanup * Update Oslo * Use Flake8 gating for bin/ceilometer-* * Update requirements to fix devstack installation * Update to the latest stevedore * Start gating on H703 * Remove disabled_notification_listeners option * Remove disabled_compute_pollsters option * Remove disabled_central_pollsters option * Longer string columns for Trait and UniqueNames * Fix nova notifier tests * pipeline: switch publisher loading model to driver * Enforce reverse time-order for sample return * Remove explicit distribute depend * Use Python 3.x compatible octal literals * Improve Python 3.x compatibility * Fix requirements * Corrected path for test requirements in docs * Fix some typo in documentation * Add instance_scheduled in entry points * fix session connection * Remove useless imports, reenable F401 checks * service: run common initialization stuff * Use console scripts for ceilometer-api * Use console scripts for ceilometer-dbsync * Use console scripts for ceilometer-agent-compute * Use console scripts for ceilometer-agent-central * agent-central: use CONF.import_opt rather than import * Move os_* options into a group * Use console scripts for ceilometer-collector * sqlalchemy: migration error when running db-sync * session flushing error * api: add limit parameters to meters * python3: Introduce py33 to tox.ini * Start to use Hacking * Session does not use ceilometer.conf's database_connection * Add support for limiting the number of samples returned * Imported Translations from Transifex * Add support policy to installation instructions * sql: fix 003 downgrade * service: remove useless PeriodicService class * Fix nova notifier tests * Explicitly set downloadcache in tox.ini * Imported Translations from Transifex * Switch to sphinxcontrib-pecanwsme for API docs * Update oslo, use new configuration generator * doc: fix hyphens instead of underscores for 'os*' conf options * Allow specifying a listen IP * Log configuration values on API startup * Don't use pecan to configure logging * Mark sensitive config options as secret * Imported Translations from Transifex * ImagePollster record duplicate counter during one poll * Rename requires files to standard names * Add an UDP publisher and receiver * hbase metaquery support * Imported Translations from Transifex * Fix and update extract_opts group extraction * Fix the sample name of 'resource_metadata' * Added missing source variable in storage drivers * Add Event methods to db api * vnics: don't presume existence of filterref/filter * force the test path to a str (sometimes is unicode) * Make sure that v2 api tests have the policy file configured * Imported Translations from Transifex * setup.cfg misses swift filter * Add a counter for instance scheduling * Move recursive_keypairs into utils * Replace nose with testr * Use fixtures in the tests * fix compute units in measurement doc * Allow suppression of v1 API * Restore default interval * Change from unittest to testtools * remove unused tests/skip module * Imported Translations from Transifex * Get all tests to use tests.base.TestCase * Allow just a bit longer to wait for the server to startup * Document keystone_authtoken section * Restore test dependency on Ming * Set the default pipline config file for tests * Imported Translations from Transifex * Fix cross-document references * Fix config setting references in API tests * Restrict pep8 & co to pep8 target * Fix meter_publisher in setup.cfg * Use flake8 instead of pep8 * Imported Translations from Transifex * Use sqlalchemy session code from oslo * Switch to pbr * fix the broken ceilometer.conf.sample link * Add a direct Ceilometer notifier * Do the same auth checks in the v2 API as in the v1 API * Add the sqlalchemy implementation of the alarms collection * Allow posting samples via the rest API (v2) * Updated the ceilometer.conf.sample * Don't use trivial alarm_id's like "1" in the test cases * Fix the nova notifier tests after a nova rename * Document HBase configuration * alarm: fix MongoDB alarm id * Use jsonutils instead of json in test/api.py * Connect the Alarm API to the db * Add the mongo implementation of alarms collection * Move meter signature computing into meter_publish * Update WSME dependency * Imported Translations from Transifex * Add Alarm DB API and models * Imported Translations from Transifex * Remove "extras" again * add links to return values from API methods * Modify limitation on request version * Doc improvements * Rename EventFilter to SampleFilter * Fixes AttributeError of FloatingIPPollster * Add just the most minimal alarm API * Update oslo before bringing in exceptions * Enumerate the meter type in the API Meter class * Remove "extras" as it is not used * Adds examples of CLI and API queries to the V2 documentation * Measurements documentation update * update the ceilometer.conf.sample * Set hbase table_prefix default to None * glance/cinder/quantum counter units are not accurate/consistent * Add some recommendations about database * Pin SQLAlchemy to 0.7.x * Ceilometer configuration.rst file not using right param names for logging * Fix require_map_reduce mim import * Extend swift middleware to collect number of requests * instances: fix counter unit * Remove Folsom support * transformer, publisher: move down base plugin classes * pipeline, publisher, transformer: reorganize code * Fix tests after nova changes * Update to the lastest loopingcall from oslo * Imported Translations from Transifex * update devstack instructions for cinder * Update openstack.common * Reformat openstack-common.conf * storage: move nose out of global imports * storage: get rid of get_event_interval * Remove gettext.install from ceilometer/__init__.py * Prepare for future i18n use of _() in nova notifier * Update part of openstack.common * Convert storage drivers to return models * Adpated to nova's gettext changes * add v2 query examples * storage: remove get_volume_sum and get_volume_max * api: run tests against HBase too * api: run sum unit tests against SQL backend too * Split and fix live db tests * Remove impl_test * api: run max_resource_volume test on SQL backend * Refactor DB tests * fix volume tests to utilize VOLUME_DELETE notification * Open havana development, bump to 2013.2 * Change the column counter_volume to Float * tests: disable Ming test if Ming unavailable * Imported Translations from Transifex * enable arguments in tox * api: run max_volume tests on SQL backend too * api: run list_sources tests on SQL and Mongo backend * api: run list_resources test against SQL * api: handle case where metadata is None * Fix statistics period computing with start/end time * Allow publishing arbitrary headers via the "storage.objects.*.bytes" counter * Updated the description of get_counters routine * enable xml error message response * Swift pollster silently return no counter if keystone endpoint is not present * Try to get rid of the "events" & "raw events" naming in the code * Switch to python-keystoneclient 0.2.3 * include a copy of the ASL 2.0 * add keystone configuration instructions to manual install docs * Update openstack.common * remove unused dependencies * Set the default_log_levels to include keystoneclient * Switch to final 1.1.0 oslo.config release * Add deprecation warnings for V1 API * Raise stevedore requirement to 0.7 * Fixed the blocking unittest issues * Fix a pep/hacking error in a swift import * Add sample configuration files for mod_wsgi * Add a tox target for building documentation * Use a non-standard port for the test server * Ensure the statistics are sorted * Start both v1 and v2 api from one daemon * Handle missing units values in mongodb data * Imported Translations from Transifex * Make HACKING compliant * Update manual installation instructions * Fix oslo.config and unittest * Return something sane from the log impl * Fix an invalid test in the storage test suite * Add the etc directory to the sdist manifest * api: run compute duration by resource on SQL backend * api: run list_projects tests against SQL backend too * api: run list users test against SQL backend too * api: run list meters tests against SQL backend too * Kwapi pollster silently return no probre if keystone endpoint is not present * HBase storage driver, initial version * Exclude tests directory from installation * Ensure missing period is treated consistently * Exclude tests when installing ceilometer * Run some APIv1 tests on different backends * Remove old configuration metering_storage_engine * Set where=tests * Decouple the nova notifier from ceilometer code * send-counter: fix & test * Remove nose wrapper script * Fix count type in MongoDB * Make sure that the period is returned as an int as the api expects an int * Imported Translations from Transifex * Remove compat cfg wrapper * compute: fix unknown flavor handling * Allow empty dict as metaquery param for sqlalchemy * Add glossary definitions for additional terms * Support different publisher interval * Fix message envelope keys * Revert recent rpc wire format changes * Document the rules for units * Fix a bug in compute manager test case * plugin: don't use @staticmethod with abc * Support list/tuple as meter message value * Imported Translations from Transifex * Update common to get new kombu serialization code * Disable notifier tests * pipeline: manager publish multiple counters * Imported Translations from Transifex * Use oslo-config-2013.1b3 * mongodb: make count an integer explicitely * tests: allow to run API tests on live db * Update to latest oslo-version * Imported Translations from Transifex * Add directive to MANIFEST.in to include all the html files * Use join_consumer_pool() for notifications * Update openstack.common * Add period support in storage drivers and API * Update openstack/common tree * storage: fix mongo live tests * swift: configure RPC service correctly * Fix tox python version for Folsom * api: use delta_seconds() * transformer: add acculumator transformer * Import service when cfg.CONF.os_* is used * pipeline: flush after publishing call * plugin: format docstring as rst * Use Mongo finalize to compute avg and duration * Code cleanup, remove useless import * api: fix a test * compute: fix notifications test * Move counter_source definition * Allow to publish several counters in a row * Fixed resource api in v2-api * Update meter publish with pipeline framework * Use the same Keystone client instance for pollster * pipeline: fix format error in logging * More robust mocking of nova conductor * Mock more conductor API methods to unblock tests * Update pollsters to return counter list * Update V2 API documentation * Added hacking.py support to pep8 portion of tox * setup: fix typo in package data * Fix formatting issue with v1 API parameters * Multiple publisher pipeline framework * Remove setuptools_git from setup_requires * Removed unused param for get_counters() * Use WSME 0.5b1 * Factorize agent code * Fixed the TemplateNotFound error in v1 api * Ceilometer-api is crashing due to pecan module missing * Clean class variable in compute manager test case * Update nova notifier test after nova change * Fix documentation formatting issues * Simplify ceilometer-api and checks Keystone middleware parsing * Fix nova conf compute_manager unavailable * Rename run_tests.sh to wrap_nosetests.sh * Update openstack.common * Corrected get_raw_event() in sqlalchemy * Higher level test for db backends * Remove useless imports * Flatten the v2 API * Update v2 API for WSME code reorg * Update WebOb version specification * Remove the ImageSizePollster * Add Kwapi pollster (energy monitoring) * Fixes a minor documentation typo * Peg the version of Ming used in tests * Update pep8 to 1.3.3 * Remove leftover useless import * Enhance policy test for init() * Provide the meters unit's in /meters * Fix keystoneclient auth_token middleware changes * policy: fix policy_file finding * Remove the _initialize_config_options * Add pyflakes * Make the v2 API date query parameters consistent * Fix test blocking issue and pin docutils version * Apply the official OpenStack stylesheets and templates to the Doc build * Fixed erroneous source filter in SQLAlchemy * Fix warnings in the documentation build * Handle finish and revert resize notifications * Add support for Folsom version of Swift * Implement user-api * Add support for Swift incoming/outgoing trafic metering * Pass a dict configuration file to auth_keystone * Import only once in nova_notifier * Fix MySQL charset error * Use default configuration file to make test data * Fix Glance control exchange * Move back api-v1 to the main api * Fix WSME arguments handling change * Remove useless gettext call in sql engine * Ground work for transifex-ify ceilometer * Add instance_type information to NetPollster * Fix dbsync API change * Fix image_id in instance resource metadata * Instantiate inspector in compute manager * remove direct nova db access from ceilometer * Make debugging the wsme app a bit easier * Implements database upgrade as storage engine independent * Fix the v1 api importing of acl * Add the ability to filter on metadata * Virt inspector directly layered over hypervisor API * Move meter.py into collector directory * Change mysql schema from latin1 to utf8 * Change default os-username to 'ceilometer' * Restore some metadata to the events and resources * Update documentation URL * Add sql db option to devstack for ceilometer * Remove debug print in V2 API * Start updating documentation for V2 API * Implement V2 API with Pecan and WSME * Move v1 API files into a subdirectory * Add test storage driver * Implement /meters to make discovery "nicer" from the client * Fix sqlalchemy for show_data and v1 web api * Implement object store metering * Make Impl of mongodb and sqlalchemy consistent * add migration migrate.cfg file to the python package * Fixes to enable the jenkins doc job to work * Lower the minimum required version of anyjson * Fix blocking test for nova notifier * network: remove left-over useless nova import * tools: set novaclient minimum version * libvirt: fix Folsom compatibility * Lower pymongo dependency * Remove rickshaw subproject * Remove unused rpc import * Adapted to nova's compute_driver moving * doc: fix cpu counter unit * tools: use tarballs rather than git for Folsom tests * Used auth_token middleware from keystoneclient * Remove cinderclient dependency * Fix latest nova changes * api: replace minified files by complete version * Add Folsom tests to tox * Handle nova.flags removal * Provide default configuration file * Fix mysql_engine option type * Remove nova.flags usage * api: add support for timestamp in _list_resources() * api: add timestamp interval support in _list_events() * tests: simplify api list_resources * Update openstack.common(except policy) * Adopted the oslo's rpc.Service change * Use libvirt num_cpu for CPU utilization calculation * Remove obsolete reference to instance.vcpus * Change references of /etc/ceilometer-{agent,collector}.conf to /etc/ceilometer/ceilometer.conf * Determine instance cores from public flavors API * Determine flavor type from the public nova API * Add comment about folsom compatibility change * Add keystone requirement for doc build * Avoid TypeError when loading libvirt.LibvirtDriver * Don't re-import flags and do parse_args instead of flags.FLAGS() * doc: rename stackforge to openstack * Fix pymongo requirements * Update .gitreview for openstack * Update use of nova config to work with folsom * compute: remove get_disks work-around * Use openstack versioning * Fix documentation build * document utc naive timestamp * Remove database access from agent pollsters * Fix merge error in central/manager.py * Fix nova config parsing * pollster trap error due to zero floating ip * Use the service.py in openstack-common * Allow no configured sources, provide a default file * Add service.py from openstack-common * Update common (except policy) * nova fake libvirt library breaking tests * Move db access out into a seperate file * Remove invalid fixme comments * Add new cpu_util meter recording CPU utilization % * Fix TypeError from old-style publish_counter calls * Fix auth middleware configuration * pin sqlalchemy to 0.7.x but not specifically 0.7.8 * add mongo index names * set tox to ignore global packages * Provide a way to disable some plugins * Use stevedore to load all plugins * implement get_volume_max for sqlalchemy * Add basic text/html renderer * network: floating IP account in Quantum * add unit test for CPUPollster * Clean up context usage * Add dependencies on clients used by pollsters * add ceilometer-send-counter * Update openstack.common.cfg * Fix tests broken by API change with Counter class * api: add source detail retrieval * Set source at publish time * Instance pollster emits instance. meter * timestamp columns in sqlalchemy not timezone aware * Remove obsolete/incorrect install instructions * network: emit router meter * Fix sqlalchemy performance problem * Added a working release-bugs.py script to tools/ * Change default API port * sqlalchemy record_meter merge objs not string * Use glance public API as opposed to registry API * Add OpenStack trove classifier for PyPI * bump version number to 0.2 * Nova libvirt release note * Update metadata for PyPI registration * tox: add missing venv * Fixes a couple typos * Counter renaming * Set correct timestamp on floatingip counter * Fix API change in make_test_data.py * Fix Nova URL in doc * Some more doc fixes * Ignore instances in the ERROR state * Use the right version number in documentation * doc: fix network.*.* resource id * image: handle glance delete notifications * image: handle glance upload notifications * image: add update event, fix ImageServe owner * network: fix create/update counter type & doc * Assorted doc fixes * add max/sum project volume and fix tests * Add general options * compute.libvirt: split read/write counters * API: add Keystone ACL and policy support * Add documentation for configuration options * network: do not emit counter on exists event, fix resource id * Move net function in class method and fix instance id * Prime counter table * Fix the configuration for the nova notifier * Initialize the control_exchange setting * Set version 0.1 * Make the instance counters use the same type * Restore manual install documentation * add quantum release note * Add release notes to docs * Update readme and create release notes * Remove duration field in Counter * Add counter for number of packets per vif * Move instance counter into its own pollster * Add a request counter for instance I/O * Rename instance disk I/O counter * Rename instances network counters * Use constant rather than string from counter type * Update the architecture diagram * Increase default polling interval * Fix compute agent publishing call * network: listen for Quantum exists event * Correct requirements filename * Fix notification subscription logic * Fix quantum notification subscriptions * Split meter publishing from the global config obj * network: add counter for actions * network: listen for Quantum notifications * Rename absolute to gauge * Fix typo in control exchanges help texts * Rework RPC notification mechanism * Update packaging files * Update URL list * Update openstack.common * Add volume/sum API endpoint for resource meters * Add resource volume/max api call * Fix dependency on anyjson * Listen for volume.delete.start instead of end * implement sqlalchemy dbengine backend * Add a notification handler for image downloads * Allow glance pollster tests to run * Create tox env definition for using a live db * Picking up dependencies from pip-requires file * Specify a new queue in manager * Rework RPC connection * Stop using nova's rpc module * Add configuration script to turn on notifications * Pep8 fixes, implement pep8 check on tests subdir * Use standard CLI options & env vars for creds * compute: remove get_metadata_from_event() * Listen for volume notifications * Add pollster for Glance * Fix Nova notifier test case * Fix nova flag parsing * Add nova_notifier notification driver for nova * Split instance polling code * Use stevedore to load storage engine drivers * Implement duration calculation API * Create tool for generating test meter data * Update openstack-common code to latest * Add bin/ceilometer-api for convenience * Add local copy of architecture diagram * Add timestamp parameters to the API docs * Check for doc build dependency before building * Pollster for network internal traffic (n1,n2) * Fix PEP8 issues * Add archicture diagram to documentation * added mongodb auth * Change timestamp management for resources * Log the instance causing the error when a pollster fails * Document how to install with devstack * Remove test skipping logic * Remove dependency on nova test modules * Add date range parameters to resource API * Add setuptools-git support * Add separate notification handler for instance flavor * Change instance meter type * Split the existing notification handlers up * Remove redundancy in the API * Separate the tox coverage test setup from py27 * Do not require user or project argument for event query * Add pymongo dependency for readthedocs.org build * Update openstack.common * Add API documentation * Be explicit about test dir * Add list projects API * Sort list of users and projects returned from queries * Add project arg to event and resource queries * Fix "meter" literal in event list API * collector exception on record_metering_data * Add API endpoint for listing raw event data * Change compute pollster API to work on one instance at a time * Create "central" agent * Skeleton for API server * fix use of source value in mongdb driver * Add {root,ephemeral}_disk_size counters * Implements vcpus counter * Fix nova configuration loading * Implements memory counter * Fix and document counter types * Check compute driver using new flag * Add openstack.common.{context,notifier,log} and update .rpc * Update review server link * Add link to roadmap * Add indexes to MongoDB driver * extend developer documentation * Reset the correct nova dependency URL * Switch .gitreview to use OpenStack gerrit * Add MongoDB engine * Convert timestamps to datetime objects before storing * Reduce complexity of storage engine API * Remove usage of nova.log * Documentation edits: * fix typo in instance properties list * Add Sphinx wrapper around existing docs * Configure nova.flags as well as openstack.common.cfg * First draft of plugin/agent documentation. Fixes bug 1018311 * Essex: update Nova to 2012.1.1, add python-novaclient * Split service preparation, periodic interval configurable * Use the same instance metadata everywhere * Emit meter event for instance "exists" * Start defining DB engine API * Fallback on nova.rpc for Essex * Add instance metadata from notification events * Combined fix to get past broken state of repo * Add more metadata to instance counter * Register storage options on import * Add Essex tests * log more than ceilometer * Remove event_type field from meter messages * fix message signatures for nested dicts * Remove nova.flags usage * Copy openstack.common.cfg * check message signatures in the collector * Sketch out a plugin system for saving metering data * refactor meter event publishing code * Add and use ceilometer own log module * add counter type field * Use timestamp instead of datetime when creating Counter * Use new flag API * Fix a PEP8 error * Make the stand-alone test script mimic tox * Remove unneeded eventlet test requirement * Add listeners for other instance-related events * Add tox configuration * Use openstack.common.cfg for ceilometer options * Publish and receive metering messages * Add floating IP pollster * Fix tests based on DB by importing nova.tests * make the pollsters in the agent plugins * Build ceilometer-agent and ceilometer-collector * Add plugin support to the notification portion of the collector daemon * Add CPU time fetching * Add an example function for converting a nova notification to a counter * add a tool for recording notifications and replaying them * Add an exception handler to deal with errors that occur when the info in nova is out of sync with reality (as on my currently broken system). Also adds a nova prefix to the logger for now so messages from this module make it into the log file * Periodically fetch for disk io stats * Use nova.service, add a manager class * Change license to Apache 2.0 * Add setup.py * Import ceilometer-nova-compute * Ignore pyc files * Add link to blueprint * Add .gitreview file * initial commit aodh-2.0.6/.testr.conf0000664000567000056710000000072313076064371015672 0ustar jenkinsjenkins00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-600} \ ${PYTHON:-python} -m subunit.run discover ${OS_TEST_PATH:-./aodh/tests} -t . $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list # NOTE(chdent): Only used/matches on gabbi-related tests. group_regex=(gabbi\.(suitemaker|driver)\.test_gabbi_([^_]+))_ aodh-2.0.6/README.rst0000664000567000056710000000040413076064372015270 0ustar jenkinsjenkins00000000000000aodh ==== Release notes can be read online at: http://docs.openstack.org/developer/aodh/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/aodh/ The project home is at: http://launchpad.net/aodh aodh-2.0.6/setup.py0000664000567000056710000000177713076064371015330 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr'], pbr=True) aodh-2.0.6/run-functional-tests.sh0000775000567000056710000000031713076064372020247 0ustar jenkinsjenkins00000000000000#!/bin/bash -x set -e case $AODH_TEST_BACKEND in hbase) export AODH_TEST_STORAGE_URL="hbase://__test__" ;; *) source $(which overtest) $AODH_TEST_BACKEND ;; esac $* aodh-2.0.6/LICENSE0000664000567000056710000002363713076064371014622 0ustar jenkinsjenkins00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. aodh-2.0.6/.mailmap0000664000567000056710000000370613076064371015231 0ustar jenkinsjenkins00000000000000# Format is: # # Adam Gandelman Alan Pevec Alexei Kornienko ChangBo Guo(gcb) Chang Bo Guo Chinmaya Bharadwaj chinmay Clark Boylan Doug Hellmann Fei Long Wang Fengqian Gao Fengqian Fengqian Gao Fengqian.Gao Gordon Chung gordon chung Gordon Chung Gordon Chung Gordon Chung gordon chung Ildiko Vancsa Ildiko John H. Tran John Tran Julien Danjou LiuSheng liu-sheng Mehdi Abaakouk Nejc Saje Nejc Saje Nicolas Barcet (nijaba) Pádraig Brady Rich Bowen Sandy Walsh Sascha Peilicke Sean Dague Shengjie Min shengjie-min Shuangtai Tian shuangtai Swann Croiset ZhiQiang Fan aodh-2.0.6/aodh-config-generator.conf0000664000567000056710000000045213076064372020615 0ustar jenkinsjenkins00000000000000[DEFAULT] output_file = etc/aodh/aodh.conf wrap_width = 79 namespace = aodh namespace = aodh-auth namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.middleware.cors namespace = oslo.policy namespace = oslo.service.service namespace = keystonemiddleware.auth_token aodh-2.0.6/MAINTAINERS0000664000567000056710000000066413076064372015306 0ustar jenkinsjenkins00000000000000= Generalist Code Reviewers = The current members of aodh-core are listed here: https://launchpad.net/~aodh-drivers/+members#active This group can +2 and approve patches in aodh. However, they may choose to seek feedback from the appropriate specialist maintainer before approving a patch if it is in any way controversial or risky. = IRC handles of maintainers = cdent gordc ildikov jd__ liusheng llu _nadya_ r-mibu sileht zqfan aodh-2.0.6/PKG-INFO0000664000567000056710000000201413076064720014672 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: aodh Version: 2.0.6 Summary: OpenStack Telemetry Alarming Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: aodh ==== Release notes can be read online at: http://docs.openstack.org/developer/aodh/releasenotes/index.html Documentation for the project can be found at: http://docs.openstack.org/developer/aodh/ The project home is at: http://launchpad.net/aodh Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Topic :: System :: Monitoring aodh-2.0.6/setup.cfg0000664000567000056710000001054413076064720015425 0ustar jenkinsjenkins00000000000000[metadata] name = aodh summary = OpenStack Telemetry Alarming description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = http://www.openstack.org/ classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Topic :: System :: Monitoring [global] setup-hooks = pbr.hooks.setup_hook [files] packages = aodh data_files = etc/aodh = etc/aodh/* [extras] mysql = SQLAlchemy<1.1.0,>=0.9.7 alembic>=0.7.2 PyMySQL>=0.6.2 # MIT License postgresql = SQLAlchemy<1.1.0,>=0.9.7 alembic>=0.7.2 psycopg2 mongodb = pymongo>=3.0.2 hbase = happybase!=0.7,>=0.5,<1.0.0:python_version=='2.7' # Required for bson pymongo>=3.0.2 doc = oslosphinx>=2.5.0 # Apache-2.0 reno>=0.1.1 # Apache2 sphinx!=1.2.0,!=1.3b1,<1.3,>=1.1.2 sphinxcontrib-httpdomain sphinxcontrib-pecanwsme>=0.8 test = overtest>=0.7.0 oslotest>=1.5.1 # Apache-2.0 coverage>=3.6 fixtures>=1.3.1 mock>=1.0 testrepository>=0.0.18 testresources>=0.2.4 # Apache-2.0/BSD testtools>=1.4.0 gabbi>=0.12.0 # Apache-2.0 # Provides subunit-trace tempest-lib>=0.6.1 python-subunit>=0.0.18 [entry_points] aodh.storage = log = aodh.storage.impl_log:Connection mongodb = aodh.storage.impl_mongodb:Connection mysql = aodh.storage.impl_sqlalchemy:Connection mysql+pymysql = aodh.storage.impl_sqlalchemy:Connection postgresql = aodh.storage.impl_sqlalchemy:Connection sqlite = aodh.storage.impl_sqlalchemy:Connection hbase = aodh.storage.impl_hbase:Connection aodh.alarm.rule = threshold = aodh.api.controllers.v2.alarm_rules.threshold:AlarmThresholdRule combination = aodh.api.controllers.v2.alarm_rules.combination:AlarmCombinationRule gnocchi_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:MetricOfResourceRule gnocchi_aggregation_by_metrics_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricsByIdLookupRule gnocchi_aggregation_by_resources_threshold = aodh.api.controllers.v2.alarm_rules.gnocchi:AggregationMetricByResourcesLookupRule event = aodh.api.controllers.v2.alarm_rules.event:AlarmEventRule composite = aodh.api.controllers.v2.alarm_rules.composite:composite_rule aodh.evaluator = threshold = aodh.evaluator.threshold:ThresholdEvaluator combination = aodh.evaluator.combination:CombinationEvaluator gnocchi_resources_threshold = aodh.evaluator.gnocchi:GnocchiResourceThresholdEvaluator gnocchi_aggregation_by_metrics_threshold = aodh.evaluator.gnocchi:GnocchiAggregationMetricsThresholdEvaluator gnocchi_aggregation_by_resources_threshold = aodh.evaluator.gnocchi:GnocchiAggregationResourcesThresholdEvaluator composite = aodh.evaluator.composite:CompositeEvaluator aodh.notifier = log = aodh.notifier.log:LogAlarmNotifier test = aodh.notifier.test:TestAlarmNotifier http = aodh.notifier.rest:RestAlarmNotifier https = aodh.notifier.rest:RestAlarmNotifier trust+http = aodh.notifier.trust:TrustRestAlarmNotifier trust+https = aodh.notifier.trust:TrustRestAlarmNotifier zaqar = aodh.notifier.zaqar:ZaqarAlarmNotifier console_scripts = aodh-api = aodh.cmd.api:main aodh-dbsync = aodh.cmd.storage:dbsync aodh-expirer = aodh.cmd.storage:expirer aodh-evaluator = aodh.cmd.alarm:evaluator aodh-notifier = aodh.cmd.alarm:notifier aodh-listener = aodh.cmd.alarm:listener aodh-data-migration = aodh.cmd.data_migration:main oslo.config.opts = aodh = aodh.opts:list_opts aodh-auth = aodh.opts:list_keystoneauth_opts oslo.config.opts.defaults = aodh = aodh.conf.defaults:set_cors_middleware_defaults keystoneauth1.plugin = password-aodh-legacy = aodh.keystone_client:LegacyAodhKeystoneLoader tempest.test_plugins = aodh_tests = aodh.tests.tempest.plugin:AodhTempestPlugin [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source [pbr] warnerrors = true autodoc_index_modules = true autodoc_exclude_modules = aodh.storage.sqlalchemy.alembic.* [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext mapping_file = babel.cfg output_file = aodh/locale/aodh.pot [compile_catalog] directory = aodh/locale domain = aodh [update_catalog] domain = aodh output_dir = aodh/locale input_file = aodh/locale/aodh.pot [egg_info] tag_build = tag_date = 0 aodh-2.0.6/devstack/0000775000567000056710000000000013076064720015404 5ustar jenkinsjenkins00000000000000aodh-2.0.6/devstack/settings0000664000567000056710000000227013076064372017173 0ustar jenkinsjenkins00000000000000# turn on all the aodh services by default # API service enable_service aodh-api # Alarming enable_service aodh-notifier aodh-evaluator # Listener for Event Alarming enable_service aodh-listener # Default directories AODH_DIR=$DEST/aodh AODH_CONF_DIR=/etc/aodh AODH_CONF=$AODH_CONF_DIR/aodh.conf AODH_API_LOG_DIR=/var/log/aodh-api AODH_AUTH_CACHE_DIR=${AODH_AUTH_CACHE_DIR:-/var/cache/aodh} AODH_WSGI_DIR=${AODH_WSGI_DIR:-/var/www/aodh} # Set up database backend AODH_BACKEND=${AODH_BACKEND:-mysql} # Aodh connection info. AODH_SERVICE_PROTOCOL=http AODH_SERVICE_HOST=$SERVICE_HOST AODH_SERVICE_PORT=${AODH_SERVICE_PORT:-8042} AODH_USE_MOD_WSGI=${AODH_USE_MOD_WSGI:-${ENABLE_HTTPD_MOD_WSGI_SERVICES}} # To enable OSprofiler change value of this variable to "notifications,profiler" AODH_NOTIFICATION_TOPICS=${AODH_NOTIFICATION_TOPICS:-notifications} AODH_COORDINATION_URL=${AODH_COORDINATION_URL:-} # Tell Tempest this project is present TEMPEST_SERVICES+=,aodh # Set up default directories for client and middleware GITDIR["python-ceilometerclient"]=$DEST/python-ceilometerclient # Get rid of this before done. # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: aodh-2.0.6/devstack/gate/0000775000567000056710000000000013076064720016324 5ustar jenkinsjenkins00000000000000aodh-2.0.6/devstack/gate/gate_hook.sh0000775000567000056710000000250013076064372020623 0ustar jenkinsjenkins00000000000000#!/bin/bash # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This script is executed inside gate_hook function in devstack gate. ENABLED_SERVICES="key,aodi-api,aodh-notifier,aodh-evaluator" ENABLED_SERVICES+="ceilometer-acompute,ceilometer-acentral,ceilometer-anotification," ENABLED_SERVICES+="ceilometer-collector,ceilometer-api," # The backend is passed in by the job as the first and only argument export AODH_BACKEND="${1}" export DEVSTACK_GATE_INSTALL_TESTONLY=1 export DEVSTACK_GATE_NO_SERVICES=1 export DEVSTACK_GATE_TEMPEST=0 export DEVSTACK_GATE_EXERCISES=0 export KEEP_LOCALRC=1 # default to mysql case $AODH_BACKEND in postgresql) export DEVSTACK_GATE_POSTGRES=1 ;; esac DEVSTACK_LOCAL_CONFIG+=$'\n'"export AODH_BACKEND=${AODH_BACKEND}" export ENABLED_SERVICES $BASE/new/devstack-gate/devstack-vm-gate.sh aodh-2.0.6/devstack/plugin.sh0000664000567000056710000002461413076064372017250 0ustar jenkinsjenkins00000000000000# Install and start **Aodh** service in devstack # # To enable Aodh in devstack add an entry to local.conf that # looks like # # [[local|localrc]] # enable_plugin aodh git://git.openstack.org/openstack/aodh # # By default all aodh services are started (see # devstack/settings). # # AODH_BACKEND: Database backend (e.g. 'mysql', 'mongodb') # AODH_COORDINATION_URL: URL for group membership service provided by tooz. # Support potential entry-points console scripts in VENV or not if [[ ${USE_VENV} = True ]]; then PROJECT_VENV["aodh"]=${AODH_DIR}.venv AODH_BIN_DIR=${PROJECT_VENV["aodh"]}/bin else AODH_BIN_DIR=$(get_python_exec_prefix) fi # Test if any Aodh services are enabled # is_aodh_enabled function is_aodh_enabled { [[ ,${ENABLED_SERVICES} =~ ,"aodh-" ]] && return 0 return 1 } function aodh_service_url { echo "$AODH_SERVICE_PROTOCOL://$AODH_SERVICE_HOST:$AODH_SERVICE_PORT" } # _install_mongdb - Install mongodb and python lib. function _aodh_install_mongodb { # Server package is the same on all local packages=mongodb-server if is_fedora; then # mongodb client packages="${packages} mongodb" fi install_package ${packages} if is_fedora; then restart_service mongod else restart_service mongodb fi # give time for service to restart sleep 5 } # _install_redis() - Install the redis server and python lib. function _aodh_install_redis { if is_ubuntu; then install_package redis-server restart_service redis-server else # This will fail (correctly) where a redis package is unavailable install_package redis restart_service redis fi pip_install_gr redis } # Configure mod_wsgi function _aodh_config_apache_wsgi { sudo mkdir -p $AODH_WSGI_DIR local aodh_apache_conf=$(apache_site_config_for aodh) local apache_version=$(get_apache_version) local venv_path="" # Copy proxy vhost and wsgi file sudo cp $AODH_DIR/aodh/api/app.wsgi $AODH_WSGI_DIR/app if [[ ${USE_VENV} = True ]]; then venv_path="python-path=${PROJECT_VENV["aodh"]}/lib/$(python_version)/site-packages" fi sudo cp $AODH_DIR/devstack/apache-aodh.template $aodh_apache_conf if [ "$AODH_BACKEND" = 'hbase' ] ; then # Use one process to have single in-memory DB instance for data consistency AODH_API_WORKERS=1 else AODH_API_WORKERS=$API_WORKERS fi sudo sed -e " s|%PORT%|$AODH_SERVICE_PORT|g; s|%APACHE_NAME%|$APACHE_NAME|g; s|%WSGIAPP%|$AODH_WSGI_DIR/app|g; s|%USER%|$STACK_USER|g; s|%APIWORKERS%|$AODH_API_WORKERS|g; s|%VIRTUALENV%|$venv_path|g " -i $aodh_apache_conf } # Install required services for coordination function _aodh_prepare_coordination { if echo $AODH_COORDINATION_URL | grep -q '^memcached:'; then install_package memcached elif echo $AODH_COORDINATION_URL | grep -q '^redis:'; then _aodh_install_redis fi } # Install required services for storage backends function _aodh_prepare_storage_backend { if [ "$AODH_BACKEND" = 'mongodb' ] ; then pip_install_gr pymongo _aodh_install_mongodb fi } # Create aodh related accounts in Keystone function _aodh_create_accounts { if is_service_enabled aodh-api; then create_service_user "aodh" "admin" local aodh_service=$(get_or_create_service "aodh" \ "alarming" "OpenStack Alarming Service") get_or_create_endpoint $aodh_service \ "$REGION_NAME" \ "$(aodh_service_url)" \ "$(aodh_service_url)" \ "$(aodh_service_url)" fi } # Activities to do before aodh has been installed. function preinstall_aodh { # Needed to build psycopg2 if is_ubuntu; then install_package libpq-dev else install_package postgresql-devel fi } # Remove WSGI files, disable and remove Apache vhost file function _aodh_cleanup_apache_wsgi { sudo rm -f $AODH_WSGI_DIR/* sudo rm -f $(apache_site_config_for aodh) } # cleanup_aodh() - Remove residual data files, anything left over # from previous runs that a clean run would need to clean up function cleanup_aodh { if [ "$AODH_BACKEND" = 'mongodb' ] ; then mongo aodh --eval "db.dropDatabase();" fi if [ "$AODH_USE_MOD_WSGI" == "True" ]; then _aodh_cleanup_apache_wsgi fi } # Set configuration for storage backend. function _aodh_configure_storage_backend { if [ "$AODH_BACKEND" = 'mysql' ] || [ "$AODH_BACKEND" = 'postgresql' ] ; then iniset $AODH_CONF database connection $(database_connection_url aodh) elif [ "$AODH_BACKEND" = 'mongodb' ] ; then iniset $AODH_CONF database connection mongodb://localhost:27017/aodh cleanup_aodh elif [ "$AODH_BACKEND" = 'hbase' ] ; then iniset $AODH_CONF database connection hbase://__test__ else die $LINENO "Unable to configure unknown AODH_BACKEND $AODH_BACKEND" fi } # Configure Aodh function configure_aodh { iniset_rpc_backend aodh $AODH_CONF iniset $AODH_CONF DEFAULT notification_topics "$AODH_NOTIFICATION_TOPICS" iniset $AODH_CONF DEFAULT verbose True iniset $AODH_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" if [[ -n "$AODH_COORDINATION_URL" ]]; then iniset $AODH_CONF coordination backend_url $AODH_COORDINATION_URL fi # Install the policy file for the API server cp $AODH_DIR/etc/aodh/policy.json $AODH_CONF_DIR iniset $AODH_CONF oslo_policy policy_file $AODH_CONF_DIR/policy.json cp $AODH_DIR/etc/aodh/api_paste.ini $AODH_CONF_DIR # The alarm evaluator needs these options to call gnocchi/ceilometer APIs iniset $AODH_CONF service_credentials auth_type password iniset $AODH_CONF service_credentials username aodh iniset $AODH_CONF service_credentials user_domain_id default iniset $AODH_CONF service_credentials project_domain_id default iniset $AODH_CONF service_credentials password $SERVICE_PASSWORD iniset $AODH_CONF service_credentials project_name $SERVICE_PROJECT_NAME iniset $AODH_CONF service_credentials region_name $REGION_NAME iniset $AODH_CONF service_credentials auth_url $KEYSTONE_SERVICE_URI configure_auth_token_middleware $AODH_CONF aodh $AODH_AUTH_CACHE_DIR iniset $AODH_CONF notification store_events $AODH_EVENTS # Configured storage _aodh_configure_storage_backend # NOTE: This must come after database configuration as those can # call cleanup_aodh which will wipe the WSGI config. if [ "$AODH_USE_MOD_WSGI" == "True" ]; then iniset $AODH_CONF api pecan_debug "False" _aodh_config_apache_wsgi fi if is_service_enabled gnocchi-api; then iniset $AODH_CONF DEFAULT gnocchi_url $(gnocchi_service_url) fi } # init_aodh() - Initialize etc. function init_aodh { # Get aodh keystone settings in place _aodh_create_accounts # Create cache dir sudo install -d -o $STACK_USER $AODH_AUTH_CACHE_DIR rm -f $AODH_AUTH_CACHE_DIR/* if is_service_enabled mysql postgresql; then if [ "$AODH_BACKEND" = 'mysql' ] || [ "$AODH_BACKEND" = 'postgresql' ] ; then recreate_database aodh $AODH_BIN_DIR/aodh-dbsync fi fi } # Install Aodh. # The storage and coordination backends are installed here because the # virtualenv context is active at this point and python drivers need to be # installed. The context is not active during preinstall (when it would # otherwise makes sense to do the backend services). function install_aodh { _aodh_prepare_coordination _aodh_prepare_storage_backend install_aodhclient sudo -H pip install -e "$AODH_DIR"[test,$AODH_BACKEND] sudo install -d -o $STACK_USER -m 755 $AODH_CONF_DIR $AODH_API_LOG_DIR } # install_aodhclient() - Collect source and prepare function install_aodhclient { if use_library_from_git "python-ceilometerclient"; then git_clone_by_name "python-ceilometerclient" setup_dev_lib "python-ceilometerclient" sudo install -D -m 0644 -o $STACK_USER {${GITDIR["python-ceilometerclient"]}/tools/,/etc/bash_completion.d/}ceilometer.bash_completion else pip_install_gr python-ceilometerclient fi } # start_aodh() - Start running processes, including screen function start_aodh { if [[ "$AODH_USE_MOD_WSGI" == "False" ]]; then run_process aodh-api "$AODH_BIN_DIR/aodh-api -d -v --log-dir=$AODH_API_LOG_DIR --config-file $AODH_CONF" else enable_apache_site aodh restart_apache_server tail_log aodh /var/log/$APACHE_NAME/aodh.log tail_log aodh-api /var/log/$APACHE_NAME/aodh_access.log fi # Only die on API if it was actually intended to be turned on if is_service_enabled aodh-api; then echo "Waiting for aodh-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $(aodh_service_url)/v2/; then die $LINENO "aodh-api did not start" fi fi run_process aodh-notifier "$AODH_BIN_DIR/aodh-notifier --config-file $AODH_CONF" run_process aodh-evaluator "$AODH_BIN_DIR/aodh-evaluator --config-file $AODH_CONF" run_process aodh-listener "$AODH_BIN_DIR/aodh-listener --config-file $AODH_CONF" } # stop_aodh() - Stop running processes function stop_aodh { if [ "$AODH_USE_MOD_WSGI" == "True" ]; then disable_apache_site aodh restart_apache_server fi # Kill the aodh screen windows for serv in aodh-api aodh-notifier aodh-evaluator aodh-listener; do stop_process $serv done } # This is the main for plugin.sh if is_service_enabled aodh; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then # Set up other services echo_summary "Configuring system services for Aodh" preinstall_aodh elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing Aodh" # Use stack_install_service here to account for vitualenv stack_install_service aodh elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring Aodh" configure_aodh elif [[ "$1" == "stack" && "$2" == "extra" ]]; then echo_summary "Initializing Aodh" # Tidy base for aodh init_aodh # Start the services start_aodh fi if [[ "$1" == "unstack" ]]; then echo_summary "Shutting Down Aodh" stop_aodh fi if [[ "$1" == "clean" ]]; then echo_summary "Cleaning Aodh" cleanup_aodh fi fi aodh-2.0.6/devstack/README.rst0000664000567000056710000000055413076064372017102 0ustar jenkinsjenkins00000000000000========================= Enabling Aodh in DevStack ========================= 1. Download DevStack:: git clone https://git.openstack.org/openstack-dev/devstack.git cd devstack 2. Add this repo as an external repository in ``local.conf`` file:: [[local|localrc]] enable_plugin aodh https://git.openstack.org/openstack/aodh 3. Run ``stack.sh``. aodh-2.0.6/devstack/apache-aodh.template0000664000567000056710000000074513076064371021303 0ustar jenkinsjenkins00000000000000Listen %PORT% WSGIDaemonProcess aodh-api processes=%APIWORKERS% threads=10 user=%USER% display-name=%{GROUP} %VIRTUALENV% WSGIProcessGroup aodh-api WSGIScriptAlias / %WSGIAPP% WSGIApplicationGroup %{GLOBAL} = 2.4> ErrorLogFormat "%{cu}t %M" ErrorLog /var/log/%APACHE_NAME%/aodh.log CustomLog /var/log/%APACHE_NAME%/aodh_access.log combined WSGISocketPrefix /var/run/%APACHE_NAME%