osprofiler-1.15.2/0000775000175100017510000000000013241120161013777 5ustar zuulzuul00000000000000osprofiler-1.15.2/setup.py0000666000175100017510000000170313241117762015531 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools # In python < 2.7.4, a lazy loading of package `pbr` will break # setuptools if some other modules registered functions in `atexit`. # solution from: http://bugs.python.org/issue15881#msg170215 try: import multiprocessing # noqa except ImportError: pass setuptools.setup( setup_requires=['pbr>=1.8'], pbr=True) osprofiler-1.15.2/osprofiler.egg-info/0000775000175100017510000000000013241120161017655 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler.egg-info/not-zip-safe0000664000175100017510000000000113241120145022105 0ustar zuulzuul00000000000000 osprofiler-1.15.2/osprofiler.egg-info/dependency_links.txt0000664000175100017510000000000113241120160023722 0ustar zuulzuul00000000000000 osprofiler-1.15.2/osprofiler.egg-info/top_level.txt0000664000175100017510000000001313241120160022400 0ustar zuulzuul00000000000000osprofiler osprofiler-1.15.2/osprofiler.egg-info/SOURCES.txt0000664000175100017510000000600513241120161021542 0ustar zuulzuul00000000000000.testr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog LICENSE README.rst bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini devstack/README.rst devstack/plugin.sh devstack/settings devstack/lib/osprofiler doc/source/Makefile doc/source/conf.py doc/source/index.rst doc/source/user/api.rst doc/source/user/background.rst doc/source/user/collectors.rst doc/source/user/history.rst doc/source/user/index.rst doc/source/user/integration.rst doc/source/user/similar_projects.rst doc/specs/README.rst doc/specs/template.rst doc/specs/implemented/README.rst doc/specs/implemented/make_paste_ini_config_optional.rst doc/specs/implemented/multi_backend_support.rst doc/specs/in-progress/README.rst doc/specs/in-progress/better_devstack_integration.rst doc/specs/in-progress/integration_testing.rst osprofiler/__init__.py osprofiler/_utils.py osprofiler/exc.py osprofiler/initializer.py osprofiler/notifier.py osprofiler/opts.py osprofiler/profiler.py osprofiler/sqlalchemy.py osprofiler/web.py osprofiler.egg-info/PKG-INFO osprofiler.egg-info/SOURCES.txt osprofiler.egg-info/dependency_links.txt osprofiler.egg-info/entry_points.txt osprofiler.egg-info/not-zip-safe osprofiler.egg-info/pbr.json osprofiler.egg-info/requires.txt osprofiler.egg-info/top_level.txt osprofiler/cmd/__init__.py osprofiler/cmd/cliutils.py osprofiler/cmd/commands.py osprofiler/cmd/shell.py osprofiler/cmd/template.html osprofiler/drivers/__init__.py osprofiler/drivers/base.py osprofiler/drivers/ceilometer.py osprofiler/drivers/elasticsearch_driver.py osprofiler/drivers/loginsight.py osprofiler/drivers/messaging.py osprofiler/drivers/mongodb.py osprofiler/drivers/redis_driver.py osprofiler/hacking/__init__.py osprofiler/hacking/checks.py osprofiler/tests/__init__.py osprofiler/tests/test.py osprofiler/tests/functional/__init__.py osprofiler/tests/functional/config.cfg osprofiler/tests/functional/test_driver.py osprofiler/tests/unit/__init__.py osprofiler/tests/unit/test_initializer.py osprofiler/tests/unit/test_notifier.py osprofiler/tests/unit/test_opts.py osprofiler/tests/unit/test_profiler.py osprofiler/tests/unit/test_sqlalchemy.py osprofiler/tests/unit/test_utils.py osprofiler/tests/unit/test_web.py osprofiler/tests/unit/cmd/__init__.py osprofiler/tests/unit/cmd/test_shell.py osprofiler/tests/unit/doc/__init__.py osprofiler/tests/unit/doc/test_specs.py osprofiler/tests/unit/drivers/__init__.py osprofiler/tests/unit/drivers/test_base.py osprofiler/tests/unit/drivers/test_ceilometer.py osprofiler/tests/unit/drivers/test_elasticsearch.py osprofiler/tests/unit/drivers/test_loginsight.py osprofiler/tests/unit/drivers/test_messaging.py osprofiler/tests/unit/drivers/test_mongodb.py osprofiler/tests/unit/drivers/test_redis_driver.py releasenotes/notes/add-reno-996dd44974d53238.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder releasenotes/source/_templates/.placeholder tools/lint.py tools/patch_tox_venv.pyosprofiler-1.15.2/osprofiler.egg-info/entry_points.txt0000664000175100017510000000030013241120160023143 0ustar zuulzuul00000000000000[console_scripts] osprofiler = osprofiler.cmd.shell:main [oslo.config.opts] osprofiler = osprofiler.opts:list_opts [paste.filter_factory] osprofiler = osprofiler.web:WsgiMiddleware.factory osprofiler-1.15.2/osprofiler.egg-info/PKG-INFO0000664000175100017510000000443213241120160020754 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: osprofiler Version: 1.15.2 Summary: OpenStack Profiler Library Home-page: https://docs.openstack.org/osprofiler/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/osprofiler.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on =========================================================== OSProfiler -- Library for cross-project profiling library =========================================================== .. image:: https://img.shields.io/pypi/v/osprofiler.svg :target: https://pypi.python.org/pypi/osprofiler/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/osprofiler.svg :target: https://pypi.python.org/pypi/osprofiler/ :alt: Downloads OSProfiler provides a tiny but powerful library that is used by most (soon to be all) OpenStack projects and their python clients. It provides functionality to be able to generate 1 trace per request, that goes through all involved services. This trace can then be extracted and used to build a tree of calls which can be quite handy for a variety of reasons (for example in isolating cross-project performance issues). * Free software: Apache license * Documentation: https://docs.openstack.org/osprofiler/latest/ * Source: https://git.openstack.org/cgit/openstack/osprofiler * Bugs: https://bugs.launchpad.net/osprofiler Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.5 osprofiler-1.15.2/osprofiler.egg-info/requires.txt0000664000175100017510000000021713241120160022254 0ustar zuulzuul00000000000000six>=1.9.0 oslo.utils>=3.16.0 WebOb>=1.6.0 requests>=2.10.0 netaddr!=0.7.16,>=0.7.13 oslo.concurrency>=3.8.0 [oslo_config] oslo.config>=3.2.0 osprofiler-1.15.2/osprofiler.egg-info/pbr.json0000664000175100017510000000005613241120160021333 0ustar zuulzuul00000000000000{"git_version": "2eef344", "is_release": true}osprofiler-1.15.2/requirements.txt0000666000175100017510000000024713241120010017261 0ustar zuulzuul00000000000000six>=1.9.0 # MIT oslo.utils>=3.16.0 # Apache-2.0 WebOb>=1.6.0 # MIT requests>=2.10.0 # Apache-2.0 netaddr>=0.7.13,!=0.7.16 # BSD oslo.concurrency>=3.8.0 # Apache-2.0 osprofiler-1.15.2/.zuul.yaml0000666000175100017510000000035513241120010015736 0ustar zuulzuul00000000000000- project: name: openstack/osprofiler check: jobs: - openstack-tox-functional - openstack-tox-functional-py35 gate: jobs: - openstack-tox-functional - openstack-tox-functional-py35 osprofiler-1.15.2/osprofiler/0000775000175100017510000000000013241120161016163 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/web.py0000666000175100017510000001067213241117762017337 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six import webob.dec from osprofiler import _utils as utils from osprofiler import profiler # Trace keys that are required or optional, any other # keys that are present will cause the trace to be rejected... _REQUIRED_KEYS = ("base_id", "hmac_key") _OPTIONAL_KEYS = ("parent_id",) #: Http header that will contain the needed traces data. X_TRACE_INFO = "X-Trace-Info" #: Http header that will contain the traces data hmac (that will be validated). X_TRACE_HMAC = "X-Trace-HMAC" def get_trace_id_headers(): """Adds the trace id headers (and any hmac) into provided dictionary.""" p = profiler.get() if p and p.hmac_key: data = {"base_id": p.get_base_id(), "parent_id": p.get_id()} pack = utils.signed_pack(data, p.hmac_key) return { X_TRACE_INFO: pack[0], X_TRACE_HMAC: pack[1] } return {} _ENABLED = None _HMAC_KEYS = None def disable(): """Disable middleware. This is the alternative way to disable middleware. It will be used to be able to disable middleware via oslo.config. """ global _ENABLED _ENABLED = False def enable(hmac_keys=None): """Enable middleware.""" global _ENABLED, _HMAC_KEYS _ENABLED = True _HMAC_KEYS = utils.split(hmac_keys or "") class WsgiMiddleware(object): """WSGI Middleware that enables tracing for an application.""" def __init__(self, application, hmac_keys=None, enabled=False, **kwargs): """Initialize middleware with api-paste.ini arguments. :application: wsgi app :hmac_keys: Only trace header that was signed with one of these hmac keys will be processed. This limitation is essential, because it allows to profile OpenStack by only those who knows this key which helps avoid DDOS. :enabled: This middleware can be turned off fully if enabled is False. :kwargs: Other keyword arguments. NOTE(tovin07): Currently, this `kwargs` is not used at all. It's here to avoid some extra keyword arguments in local_conf that cause `__init__() got an unexpected keyword argument`. """ self.application = application self.name = "wsgi" self.enabled = enabled self.hmac_keys = utils.split(hmac_keys or "") @classmethod def factory(cls, global_conf, **local_conf): def filter_(app): return cls(app, **local_conf) return filter_ def _trace_is_valid(self, trace_info): if not isinstance(trace_info, dict): return False trace_keys = set(six.iterkeys(trace_info)) if not all(k in trace_keys for k in _REQUIRED_KEYS): return False if trace_keys.difference(_REQUIRED_KEYS + _OPTIONAL_KEYS): return False return True @webob.dec.wsgify def __call__(self, request): if (_ENABLED is not None and not _ENABLED or _ENABLED is None and not self.enabled): return request.get_response(self.application) trace_info = utils.signed_unpack(request.headers.get(X_TRACE_INFO), request.headers.get(X_TRACE_HMAC), _HMAC_KEYS or self.hmac_keys) if not self._trace_is_valid(trace_info): return request.get_response(self.application) profiler.init(**trace_info) info = { "request": { "path": request.path, "query": request.query_string, "method": request.method, "scheme": request.scheme } } try: with profiler.Trace(self.name, info=info): return request.get_response(self.application) finally: profiler._clean() osprofiler-1.15.2/osprofiler/_utils.py0000666000175100017510000001176413241117762020064 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib import hmac import json import os from oslo_utils import secretutils import six def split(text, strip=True): """Splits a comma separated text blob into its components. Does nothing if already a list or tuple. """ if isinstance(text, (tuple, list)): return text if not isinstance(text, six.string_types): raise TypeError("Unknown how to split '%s': %s" % (text, type(text))) if strip: return [t.strip() for t in text.split(",") if t.strip()] else: return text.split(",") def binary_encode(text, encoding="utf-8"): """Converts a string of into a binary type using given encoding. Does nothing if text not unicode string. """ if isinstance(text, six.binary_type): return text elif isinstance(text, six.text_type): return text.encode(encoding) else: raise TypeError("Expected binary or string type") def binary_decode(data, encoding="utf-8"): """Converts a binary type into a text type using given encoding. Does nothing if data is already unicode string. """ if isinstance(data, six.binary_type): return data.decode(encoding) elif isinstance(data, six.text_type): return data else: raise TypeError("Expected binary or string type") def generate_hmac(data, hmac_key): """Generate a hmac using a known key given the provided content.""" h = hmac.new(binary_encode(hmac_key), digestmod=hashlib.sha1) h.update(binary_encode(data)) return h.hexdigest() def signed_pack(data, hmac_key): """Pack and sign data with hmac_key.""" raw_data = base64.urlsafe_b64encode(binary_encode(json.dumps(data))) # NOTE(boris-42): Don't generate_hmac if there is no hmac_key, mostly # security reason, we shouldn't allow to use WsgiMiddleware # without hmac_key, cause everybody will be able to trigger # profiler and organize DDOS. return raw_data, generate_hmac(raw_data, hmac_key) if hmac_key else None def signed_unpack(data, hmac_data, hmac_keys): """Unpack data and check that it was signed with hmac_key. :param data: json string that was singed_packed. :param hmac_data: hmac data that was generated from json by hmac_key on user side :param hmac_keys: server side hmac_keys, one of these should be the same as user used to sign with :returns: None in case of something wrong, Object in case of everything OK. """ # NOTE(boris-42): For security reason, if there is no hmac_data or # hmac_keys we don't trust data => return None. if not (hmac_keys and hmac_data): return None hmac_data = hmac_data.strip() if not hmac_data: return None for hmac_key in hmac_keys: try: user_hmac_data = generate_hmac(data, hmac_key) except Exception: # nosec pass else: if secretutils.constant_time_compare(hmac_data, user_hmac_data): try: contents = json.loads( binary_decode(base64.urlsafe_b64decode(data))) contents["hmac_key"] = hmac_key return contents except Exception: return None return None def itersubclasses(cls, _seen=None): """Generator over all subclasses of a given class in depth first order.""" _seen = _seen or set() try: subs = cls.__subclasses__() except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in subs: if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub def import_modules_from_package(package): """Import modules from package and append into sys.modules :param: package - Full package name. For example: rally.deploy.engines """ path = [os.path.dirname(__file__), ".."] + package.split(".") path = os.path.join(*path) for root, dirs, files in os.walk(path): for filename in files: if filename.startswith("__") or not filename.endswith(".py"): continue new_package = ".".join(root.split(os.sep)).split("....")[1] module_name = "%s.%s" % (new_package, filename[:-3]) __import__(module_name) osprofiler-1.15.2/osprofiler/__init__.py0000666000175100017510000000132613241117762020315 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pkg_resources __version__ = pkg_resources.get_distribution("osprofiler").version osprofiler-1.15.2/osprofiler/initializer.py0000666000175100017510000000255513241117762021106 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osprofiler import notifier from osprofiler import web def init_from_conf(conf, context, project, service, host): """Initialize notifier from service configuration :param conf: service configuration :param context: request context :param project: project name (keystone, cinder etc.) :param service: service name that will be profiled :param host: hostname or host IP address that the service will be running on. """ connection_str = conf.profiler.connection_string _notifier = notifier.create( connection_str, context=context, project=project, service=service, host=host, conf=conf) notifier.set(_notifier) web.enable(conf.profiler.hmac_keys) osprofiler-1.15.2/osprofiler/notifier.py0000666000175100017510000000433513241117762020400 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osprofiler.drivers import base def _noop_notifier(info, context=None): """Do nothing on notify().""" # NOTE(boris-42): By default we are using noop notifier. __notifier = _noop_notifier __driver_cache = {} def notify(info): """Passes the profiling info to the notifier callable. :param info: dictionary with profiling information """ __notifier(info) def get(): """Returns notifier callable.""" return __notifier def set(notifier): """Service that are going to use profiler should set callable notifier. Callable notifier is instance of callable object, that accept exactly one argument "info". "info" - is dictionary of values that contains profiling information. """ global __notifier __notifier = notifier def create(connection_string, *args, **kwargs): """Create notifier based on specified plugin_name :param connection_string: connection string which specifies the storage driver for notifier :param *args: args that will be passed to the driver's __init__ method :param **kwargs: kwargs that will be passed to the driver's __init__ method :returns: Callable notifier method :raises TypeError: In case of invalid name of plugin raises TypeError """ global __driver_cache if connection_string not in __driver_cache: __driver_cache[connection_string] = base.get_driver(connection_string, *args, **kwargs).notify return __driver_cache[connection_string] osprofiler-1.15.2/osprofiler/opts.py0000666000175100017510000001567613241117762017560 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from osprofiler import web __all__ = [ "list_opts", "set_defaults", ] _profiler_opt_group = cfg.OptGroup( "profiler", title="OpenStack cross-service profiling", help=""" OSprofiler library allows to trace requests going through various OpenStack services and create the accumulated report of what time was spent on each request processing step.""") _enabled_opt = cfg.BoolOpt( "enabled", default=False, deprecated_name="profiler_enabled", help=""" Enables the profiling for all services on this node. Default value is False (fully disable the profiling feature). Possible values: * True: Enables the feature * False: Disables the feature. The profiling cannot be started via this project operations. If the profiling is triggered by another project, this project part will be empty. """) _trace_sqlalchemy_opt = cfg.BoolOpt( "trace_sqlalchemy", default=False, help=""" Enables SQL requests profiling in services. Default value is False (SQL requests won't be traced). Possible values: * True: Enables SQL requests profiling. Each SQL query will be part of the trace and can the be analyzed by how much time was spent for that. * False: Disables SQL requests profiling. The spent time is only shown on a higher level of operations. Single SQL queries cannot be analyzed this way. """) _hmac_keys_opt = cfg.StrOpt( "hmac_keys", default="SECRET_KEY", help=""" Secret key(s) to use for encrypting context data for performance profiling. This string value should have the following format: [,,...], where each key is some random string. A user who triggers the profiling via the REST API has to set one of these keys in the headers of the REST API call to include profiling results of this node for this particular project. Both "enabled" flag and "hmac_keys" config options should be set to enable profiling. Also, to generate correct profiling information across all services at least one key needs to be consistent between OpenStack projects. This ensures it can be used from client side to generate the trace, containing information from all possible resources.""") _connection_string_opt = cfg.StrOpt( "connection_string", default="messaging://", help=""" Connection string for a notifier backend. Default value is messaging:// which sets the notifier to oslo_messaging. Examples of possible values: * messaging://: use oslo_messaging driver for sending notifications. * mongodb://127.0.0.1:27017 : use mongodb driver for sending notifications. * elasticsearch://127.0.0.1:9200 : use elasticsearch driver for sending notifications. """) _es_doc_type_opt = cfg.StrOpt( "es_doc_type", default="notification", help=""" Document type for notification indexing in elasticsearch. """) _es_scroll_time_opt = cfg.StrOpt( "es_scroll_time", default="2m", help=""" This parameter is a time value parameter (for example: es_scroll_time=2m), indicating for how long the nodes that participate in the search will maintain relevant resources in order to continue and support it. """) _es_scroll_size_opt = cfg.IntOpt( "es_scroll_size", default=10000, help=""" Elasticsearch splits large requests in batches. This parameter defines maximum size of each batch (for example: es_scroll_size=10000). """) _socket_timeout_opt = cfg.FloatOpt( "socket_timeout", default=0.1, help=""" Redissentinel provides a timeout option on the connections. This parameter defines that timeout (for example: socket_timeout=0.1). """) _sentinel_service_name_opt = cfg.StrOpt( "sentinel_service_name", default="mymaster", help=""" Redissentinel uses a service name to identify a master redis service. This parameter defines the name (for example: sentinal_service_name=mymaster). """) _PROFILER_OPTS = [ _enabled_opt, _trace_sqlalchemy_opt, _hmac_keys_opt, _connection_string_opt, _es_doc_type_opt, _es_scroll_time_opt, _es_scroll_size_opt, _socket_timeout_opt, _sentinel_service_name_opt ] cfg.CONF.register_opts(_PROFILER_OPTS, group=_profiler_opt_group) def set_defaults(conf, enabled=None, trace_sqlalchemy=None, hmac_keys=None, connection_string=None, es_doc_type=None, es_scroll_time=None, es_scroll_size=None, socket_timeout=None, sentinel_service_name=None): conf.register_opts(_PROFILER_OPTS, group=_profiler_opt_group) if enabled is not None: conf.set_default("enabled", enabled, group=_profiler_opt_group.name) if trace_sqlalchemy is not None: conf.set_default("trace_sqlalchemy", trace_sqlalchemy, group=_profiler_opt_group.name) if hmac_keys is not None: conf.set_default("hmac_keys", hmac_keys, group=_profiler_opt_group.name) if connection_string is not None: conf.set_default("connection_string", connection_string, group=_profiler_opt_group.name) if es_doc_type is not None: conf.set_default("es_doc_type", es_doc_type, group=_profiler_opt_group.name) if es_scroll_time is not None: conf.set_default("es_scroll_time", es_scroll_time, group=_profiler_opt_group.name) if es_scroll_size is not None: conf.set_default("es_scroll_size", es_scroll_size, group=_profiler_opt_group.name) if socket_timeout is not None: conf.set_default("socket_timeout", socket_timeout, group=_profiler_opt_group.name) if sentinel_service_name is not None: conf.set_default("sentinel_service_name", sentinel_service_name, group=_profiler_opt_group.name) def is_trace_enabled(conf=None): if conf is None: conf = cfg.CONF return conf.profiler.enabled def is_db_trace_enabled(conf=None): if conf is None: conf = cfg.CONF return conf.profiler.enabled and conf.profiler.trace_sqlalchemy def enable_web_trace(conf=None): if conf is None: conf = cfg.CONF if conf.profiler.enabled: web.enable(conf.profiler.hmac_keys) def disable_web_trace(conf=None): if conf is None: conf = cfg.CONF if conf.profiler.enabled: web.disable() def list_opts(): return [(_profiler_opt_group.name, _PROFILER_OPTS)] osprofiler-1.15.2/osprofiler/exc.py0000666000175100017510000000165513241117762017342 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. class CommandError(Exception): """Invalid usage of CLI.""" def __init__(self, message=None): self.message = message def __str__(self): return self.message or self.__class__.__doc__ class LogInsightAPIError(Exception): pass class LogInsightLoginTimeout(Exception): pass osprofiler-1.15.2/osprofiler/hacking/0000775000175100017510000000000013241120161017567 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/hacking/__init__.py0000666000175100017510000000000013241117762021705 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/hacking/checks.py0000666000175100017510000003173513241117762021431 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Guidelines for writing new hacking checks - Use only for OSProfiler specific tests. OpenStack general tests should be submitted to the common 'hacking' module. - Pick numbers in the range N3xx. Find the current test with the highest allocated number and then pick the next value. - Keep the test method code in the source file ordered based on the N3xx value. - List the new rule in the top level HACKING.rst file - Add test cases for each new rule to tests/unit/test_hacking.py """ import functools import re import tokenize re_assert_true_instance = re.compile( r"(.)*assertTrue\(isinstance\((\w|\.|\'|\"|\[|\])+, " r"(\w|\.|\'|\"|\[|\])+\)\)") re_assert_equal_type = re.compile( r"(.)*assertEqual\(type\((\w|\.|\'|\"|\[|\])+\), " r"(\w|\.|\'|\"|\[|\])+\)") re_assert_equal_end_with_none = re.compile(r"assertEqual\(.*?,\s+None\)$") re_assert_equal_start_with_none = re.compile(r"assertEqual\(None,") re_assert_true_false_with_in_or_not_in = re.compile( r"assert(True|False)\(" r"(\w|[][.'\"])+( not)? in (\w|[][.'\",])+(, .*)?\)") re_assert_true_false_with_in_or_not_in_spaces = re.compile( r"assert(True|False)\((\w|[][.'\"])+( not)? in [\[|'|\"](\w|[][.'\", ])+" r"[\[|'|\"](, .*)?\)") re_assert_equal_in_end_with_true_or_false = re.compile( r"assertEqual\((\w|[][.'\"])+( not)? in (\w|[][.'\", ])+, (True|False)\)") re_assert_equal_in_start_with_true_or_false = re.compile( r"assertEqual\((True|False), (\w|[][.'\"])+( not)? in (\w|[][.'\", ])+\)") re_no_construct_dict = re.compile( r"\sdict\(\)") re_no_construct_list = re.compile( r"\slist\(\)") re_str_format = re.compile(r""" % # start of specifier \(([^)]+)\) # mapping key, in group 1 [#0 +\-]? # optional conversion flag (?:-?\d*)? # optional minimum field width (?:\.\d*)? # optional precision [hLl]? # optional length modifier [A-z%] # conversion modifier """, re.X) re_raises = re.compile( r"\s:raise[^s] *.*$|\s:raises *:.*$|\s:raises *[^:]+$") def skip_ignored_lines(func): @functools.wraps(func) def wrapper(logical_line, filename): line = logical_line.strip() if not line or line.startswith("#") or line.endswith("# noqa"): return yield next(func(logical_line, filename)) return wrapper def _parse_assert_mock_str(line): point = line.find(".assert_") if point != -1: end_pos = line[point:].find("(") + point return point, line[point + 1: end_pos], line[: point] else: return None, None, None @skip_ignored_lines def check_assert_methods_from_mock(logical_line, filename): """Ensure that ``assert_*`` methods from ``mock`` library is used correctly N301 - base error number N302 - related to nonexistent "assert_called" N303 - related to nonexistent "assert_called_once" """ correct_names = ["assert_any_call", "assert_called_once_with", "assert_called_with", "assert_has_calls"] ignored_files = ["./tests/unit/test_hacking.py"] if filename.startswith("./tests") and filename not in ignored_files: pos, method_name, obj_name = _parse_assert_mock_str(logical_line) if pos: if method_name not in correct_names: error_number = "N301" msg = ("%(error_number)s:'%(method)s' is not present in `mock`" " library. %(custom_msg)s For more details, visit " "http://www.voidspace.org.uk/python/mock/ .") if method_name == "assert_called": error_number = "N302" custom_msg = ("Maybe, you should try to use " "'assertTrue(%s.called)' instead." % obj_name) elif method_name == "assert_called_once": # For more details, see a bug in Rally: # https://bugs.launchpad.net/rally/+bug/1305991 error_number = "N303" custom_msg = ("Maybe, you should try to use " "'assertEqual(1, %s.call_count)' " "or '%s.assert_called_once_with()'" " instead." % (obj_name, obj_name)) else: custom_msg = ("Correct 'assert_*' methods: '%s'." % "', '".join(correct_names)) yield (pos, msg % { "error_number": error_number, "method": method_name, "custom_msg": custom_msg}) @skip_ignored_lines def assert_true_instance(logical_line, filename): """Check for assertTrue(isinstance(a, b)) sentences N320 """ if re_assert_true_instance.match(logical_line): yield (0, "N320 assertTrue(isinstance(a, b)) sentences not allowed, " "you should use assertIsInstance(a, b) instead.") @skip_ignored_lines def assert_equal_type(logical_line, filename): """Check for assertEqual(type(A), B) sentences N321 """ if re_assert_equal_type.match(logical_line): yield (0, "N321 assertEqual(type(A), B) sentences not allowed, " "you should use assertIsInstance(a, b) instead.") @skip_ignored_lines def assert_equal_none(logical_line, filename): """Check for assertEqual(A, None) or assertEqual(None, A) sentences N322 """ res = (re_assert_equal_start_with_none.search(logical_line) or re_assert_equal_end_with_none.search(logical_line)) if res: yield (0, "N322 assertEqual(A, None) or assertEqual(None, A) " "sentences not allowed, you should use assertIsNone(A) " "instead.") @skip_ignored_lines def assert_true_or_false_with_in(logical_line, filename): """Check assertTrue/False(A in/not in B) with collection contents Check for assertTrue/False(A in B), assertTrue/False(A not in B), assertTrue/False(A in B, message) or assertTrue/False(A not in B, message) sentences. N323 """ res = (re_assert_true_false_with_in_or_not_in.search(logical_line) or re_assert_true_false_with_in_or_not_in_spaces.search(logical_line)) if res: yield (0, "N323 assertTrue/assertFalse(A in/not in B)sentences not " "allowed, you should use assertIn(A, B) or assertNotIn(A, B)" " instead.") @skip_ignored_lines def assert_equal_in(logical_line, filename): """Check assertEqual(A in/not in B, True/False) with collection contents Check for assertEqual(A in B, True/False), assertEqual(True/False, A in B), assertEqual(A not in B, True/False) or assertEqual(True/False, A not in B) sentences. N324 """ res = (re_assert_equal_in_end_with_true_or_false.search(logical_line) or re_assert_equal_in_start_with_true_or_false.search(logical_line)) if res: yield (0, "N324: Use assertIn/NotIn(A, B) rather than " "assertEqual(A in/not in B, True/False) when checking " "collection contents.") @skip_ignored_lines def check_quotes(logical_line, filename): """Check that single quotation marks are not used N350 """ in_string = False in_multiline_string = False single_quotas_are_used = False check_tripple = ( lambda line, i, char: ( i + 2 < len(line) and (char == line[i] == line[i + 1] == line[i + 2]) ) ) i = 0 while i < len(logical_line): char = logical_line[i] if in_string: if char == "\"": in_string = False if char == "\\": i += 1 # ignore next char elif in_multiline_string: if check_tripple(logical_line, i, "\""): i += 2 # skip next 2 chars in_multiline_string = False elif char == "#": break elif char == "'": single_quotas_are_used = True break elif char == "\"": if check_tripple(logical_line, i, "\""): in_multiline_string = True i += 3 continue in_string = True i += 1 if single_quotas_are_used: yield (i, "N350 Remove Single quotes") @skip_ignored_lines def check_no_constructor_data_struct(logical_line, filename): """Check that data structs (lists, dicts) are declared using literals N351 """ match = re_no_construct_dict.search(logical_line) if match: yield (0, "N351 Remove dict() construct and use literal {}") match = re_no_construct_list.search(logical_line) if match: yield (0, "N351 Remove list() construct and use literal []") def check_dict_formatting_in_string(logical_line, tokens): """Check that strings do not use dict-formatting with a single replacement N352 """ # NOTE(stpierre): Can't use @skip_ignored_lines here because it's # a stupid decorator that only works on functions that take # (logical_line, filename) as arguments. if (not logical_line or logical_line.startswith("#") or logical_line.endswith("# noqa")): return current_string = "" in_string = False for token_type, text, start, end, line in tokens: if token_type == tokenize.STRING: if not in_string: current_string = "" in_string = True current_string += text.strip("\"") elif token_type == tokenize.OP: if not current_string: continue # NOTE(stpierre): The string formatting operator % has # lower precedence than +, so we assume that the logical # string has concluded whenever we hit an operator of any # sort. (Most operators don't work for strings anyway.) # Some string operators do have higher precedence than %, # though, so you can technically trick this check by doing # things like: # # "%(foo)s" * 1 % {"foo": 1} # "%(foo)s"[:] % {"foo": 1} # # It also will produce false positives if you use explicit # parenthesized addition for two strings instead of # concatenation by juxtaposition, e.g.: # # ("%(foo)s" + "%(bar)s") % vals # # But if you do any of those things, then you deserve all # of the horrible things that happen to you, and probably # many more. in_string = False if text == "%": format_keys = set() for match in re_str_format.finditer(current_string): format_keys.add(match.group(1)) if len(format_keys) == 1: yield (0, "N353 Do not use mapping key string formatting " "with a single key") if text != ")": # NOTE(stpierre): You can have a parenthesized string # followed by %, so a closing paren doesn't obviate # the possibility for a substitution operator like # every other operator does. current_string = "" elif token_type in (tokenize.NL, tokenize.COMMENT): continue else: in_string = False if token_type == tokenize.NEWLINE: current_string = "" @skip_ignored_lines def check_using_unicode(logical_line, filename): """Check crosspython unicode usage N353 """ if re.search(r"\bunicode\(", logical_line): yield (0, "N353 'unicode' function is absent in python3. Please " "use 'six.text_type' instead.") def check_raises(physical_line, filename): """Check raises usage N354 """ ignored_files = ["./tests/unit/test_hacking.py", "./tests/hacking/checks.py"] if filename not in ignored_files: if re_raises.search(physical_line): return (0, "N354 ':Please use ':raises Exception: conditions' " "in docstrings.") def factory(register): register(check_assert_methods_from_mock) register(assert_true_instance) register(assert_equal_type) register(assert_equal_none) register(assert_true_or_false_with_in) register(assert_equal_in) register(check_quotes) register(check_no_constructor_data_struct) register(check_dict_formatting_in_string) register(check_using_unicode) register(check_raises) osprofiler-1.15.2/osprofiler/drivers/0000775000175100017510000000000013241120161017641 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/drivers/ceilometer.py0000666000175100017510000000561213241120010022342 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osprofiler.drivers import base from osprofiler import exc class Ceilometer(base.Driver): def __init__(self, connection_str, **kwargs): """Driver receiving profiled information from ceilometer.""" super(Ceilometer, self).__init__(connection_str) try: import ceilometerclient.client except ImportError: raise exc.CommandError( "To use this command, you should install " "'ceilometerclient' manually. Use command:\n " "'pip install python-ceilometerclient'.") try: self.client = ceilometerclient.client.get_client( kwargs["ceilometer_api_version"], **kwargs) except Exception as e: if hasattr(e, "http_status") and e.http_status == 401: msg = "Invalid OpenStack Identity credentials." else: msg = "Error occurred while connecting to Ceilometer: %s." % e raise exc.CommandError(msg) @classmethod def get_name(cls): return "ceilometer" def get_report(self, base_id): """Retrieves and parses notification from ceilometer. :param base_id: Base id of trace elements. """ _filter = [{"field": "base_id", "op": "eq", "value": base_id}] # limit is hardcoded in this code state. Later that will be changed via # connection string usage notifications = [n.to_dict() for n in self.client.events.list(_filter, limit=100000)] for n in notifications: traits = n["traits"] def find_field(f_name): return [t["value"] for t in traits if t["name"] == f_name][0] trace_id = find_field("trace_id") parent_id = find_field("parent_id") name = find_field("name") project = find_field("project") service = find_field("service") host = find_field("host") timestamp = find_field("timestamp") payload = n.get("raw", {}).get("payload", {}) self._append_results(trace_id, parent_id, name, project, service, host, timestamp, payload) return self._parse_results() osprofiler-1.15.2/osprofiler/drivers/elasticsearch_driver.py0000666000175100017510000001267513241120010024406 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six.moves.urllib.parse as parser from oslo_config import cfg from osprofiler.drivers import base from osprofiler import exc class ElasticsearchDriver(base.Driver): def __init__(self, connection_str, index_name="osprofiler-notifications", project=None, service=None, host=None, conf=cfg.CONF, **kwargs): """Elasticsearch driver for OSProfiler.""" super(ElasticsearchDriver, self).__init__(connection_str, project=project, service=service, host=host) try: from elasticsearch import Elasticsearch except ImportError: raise exc.CommandError( "To use this command, you should install " "'elasticsearch' manually. Use command:\n " "'pip install elasticsearch'.") client_url = parser.urlunparse(parser.urlparse(self.connection_str) ._replace(scheme="http")) self.conf = conf self.client = Elasticsearch(client_url) self.index_name = index_name @classmethod def get_name(cls): return "elasticsearch" def notify(self, info): """Send notifications to Elasticsearch. :param info: Contains information about trace element. In payload dict there are always 3 ids: "base_id" - uuid that is common for all notifications related to one trace. Used to simplify retrieving of all trace elements from Elasticsearch. "parent_id" - uuid of parent element in trace "trace_id" - uuid of current element in trace With parent_id and trace_id it's quite simple to build tree of trace elements, which simplify analyze of trace. """ info = info.copy() info["project"] = self.project info["service"] = self.service self.client.index(index=self.index_name, doc_type=self.conf.profiler.es_doc_type, body=info) def _hits(self, response): """Returns all hits of search query using scrolling :param response: ElasticSearch query response """ scroll_id = response["_scroll_id"] scroll_size = len(response["hits"]["hits"]) result = [] while scroll_size > 0: for hit in response["hits"]["hits"]: result.append(hit["_source"]) response = self.client.scroll(scroll_id=scroll_id, scroll=self.conf.profiler. es_scroll_time) scroll_id = response["_scroll_id"] scroll_size = len(response["hits"]["hits"]) return result def list_traces(self, query={"match_all": {}}, fields=[]): """Returns array of all base_id fields that match the given criteria :param query: dict that specifies the query criteria :param fields: iterable of strings that specifies the output fields """ for base_field in ["base_id", "timestamp"]: if base_field not in fields: fields.append(base_field) response = self.client.search(index=self.index_name, doc_type=self.conf.profiler.es_doc_type, size=self.conf.profiler.es_scroll_size, scroll=self.conf.profiler.es_scroll_time, body={"_source": fields, "query": query, "sort": [{"timestamp": "asc"}]}) return self._hits(response) def get_report(self, base_id): """Retrieves and parses notification from Elasticsearch. :param base_id: Base id of trace elements. """ response = self.client.search(index=self.index_name, doc_type=self.conf.profiler.es_doc_type, size=self.conf.profiler.es_scroll_size, scroll=self.conf.profiler.es_scroll_time, body={"query": { "match": {"base_id": base_id}}}) for n in self._hits(response): trace_id = n["trace_id"] parent_id = n["parent_id"] name = n["name"] project = n["project"] service = n["service"] host = n["info"]["host"] timestamp = n["timestamp"] self._append_results(trace_id, parent_id, name, project, service, host, timestamp, n) return self._parse_results() osprofiler-1.15.2/osprofiler/drivers/redis_driver.py0000666000175100017510000001235313241120010022673 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # Copyright 2016 IBM Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_serialization import jsonutils import six.moves.urllib.parse as parser from osprofiler.drivers import base from osprofiler import exc class Redis(base.Driver): def __init__(self, connection_str, db=0, project=None, service=None, host=None, **kwargs): """Redis driver for OSProfiler.""" super(Redis, self).__init__(connection_str, project=project, service=service, host=host) try: from redis import StrictRedis except ImportError: raise exc.CommandError( "To use this command, you should install " "'redis' manually. Use command:\n " "'pip install redis'.") parsed_url = parser.urlparse(self.connection_str) self.db = StrictRedis(host=parsed_url.hostname, port=parsed_url.port, db=db) self.namespace = "osprofiler:" @classmethod def get_name(cls): return "redis" def notify(self, info): """Send notifications to Redis. :param info: Contains information about trace element. In payload dict there are always 3 ids: "base_id" - uuid that is common for all notifications related to one trace. Used to simplify retrieving of all trace elements from Redis. "parent_id" - uuid of parent element in trace "trace_id" - uuid of current element in trace With parent_id and trace_id it's quite simple to build tree of trace elements, which simplify analyze of trace. """ data = info.copy() data["project"] = self.project data["service"] = self.service key = self.namespace + data["base_id"] + "_" + data["trace_id"] + "_" + \ data["timestamp"] self.db.set(key, jsonutils.dumps(data)) def list_traces(self, query="*", fields=[]): """Returns array of all base_id fields that match the given criteria :param query: string that specifies the query criteria :param fields: iterable of strings that specifies the output fields """ for base_field in ["base_id", "timestamp"]: if base_field not in fields: fields.append(base_field) ids = self.db.scan_iter(match=self.namespace + query) traces = [jsonutils.loads(self.db.get(i)) for i in ids] result = [] for trace in traces: result.append({key: value for key, value in trace.iteritems() if key in fields}) return result def get_report(self, base_id): """Retrieves and parses notification from Redis. :param base_id: Base id of trace elements. """ for key in self.db.scan_iter(match=self.namespace + base_id + "*"): data = self.db.get(key) n = jsonutils.loads(data) trace_id = n["trace_id"] parent_id = n["parent_id"] name = n["name"] project = n["project"] service = n["service"] host = n["info"]["host"] timestamp = n["timestamp"] self._append_results(trace_id, parent_id, name, project, service, host, timestamp, n) return self._parse_results() class RedisSentinel(Redis, base.Driver): def __init__(self, connection_str, db=0, project=None, service=None, host=None, conf=cfg.CONF, **kwargs): """Redis driver for OSProfiler.""" super(RedisSentinel, self).__init__(connection_str, project=project, service=service, host=host) try: from redis.sentinel import Sentinel except ImportError: raise exc.CommandError( "To use this command, you should install " "'redis' manually. Use command:\n " "'pip install redis'.") self.conf = conf socket_timeout = self.conf.profiler.socket_timeout parsed_url = parser.urlparse(self.connection_str) sentinel = Sentinel([(parsed_url.hostname, int(parsed_url.port))], socket_timeout=socket_timeout) self.db = sentinel.master_for(self.conf.profiler.sentinel_service_name, socket_timeout=socket_timeout) @classmethod def get_name(cls): return "redissentinel" osprofiler-1.15.2/osprofiler/drivers/messaging.py0000666000175100017510000001661013241120010022167 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import signal import time from oslo_utils import importutils from osprofiler.drivers import base class Messaging(base.Driver): def __init__(self, connection_str, project=None, service=None, host=None, context=None, conf=None, transport_url=None, idle_timeout=1, **kwargs): """Driver that uses messaging as transport for notifications :param connection_str: OSProfiler driver connection string, equals to messaging:// :param project: project name that will be included into notification :param service: service name that will be included into notification :param host: host name that will be included into notification :param context: oslo.messaging context :param conf: oslo.config CONF object :param transport_url: oslo.messaging transport, e.g. rabbit://rabbit:password@devstack:5672/ :param idle_timeout: how long to wait for new notifications after the last one seen in the trace; this parameter is useful to collect full trace of asynchronous commands, e.g. when user runs `osprofiler` right after `openstack server create` :param kwargs: black hole for any other parameters """ self.oslo_messaging = importutils.try_import("oslo_messaging") if not self.oslo_messaging: raise ValueError("Oslo.messaging library is required for " "messaging driver") super(Messaging, self).__init__(connection_str, project=project, service=service, host=host) self.context = context if not conf: oslo_config = importutils.try_import("oslo_config") if not oslo_config: raise ValueError("Oslo.config library is required for " "messaging driver") conf = oslo_config.cfg.CONF transport_kwargs = {} if transport_url: transport_kwargs["url"] = transport_url self.transport = self.oslo_messaging.get_notification_transport( conf, **transport_kwargs) self.client = self.oslo_messaging.Notifier( self.transport, publisher_id=self.host, driver="messaging", topics=["profiler"], retry=0) self.idle_timeout = idle_timeout @classmethod def get_name(cls): return "messaging" def notify(self, info, context=None): """Send notifications to backend via oslo.messaging notifier API. :param info: Contains information about trace element. In payload dict there are always 3 ids: "base_id" - uuid that is common for all notifications related to one trace. Used to simplify retrieving of all trace elements from Ceilometer. "parent_id" - uuid of parent element in trace "trace_id" - uuid of current element in trace With parent_id and trace_id it's quite simple to build tree of trace elements, which simplify analyze of trace. :param context: request context that is mostly used to specify current active user and tenant. """ info["project"] = self.project info["service"] = self.service self.client.info(context or self.context, "profiler.%s" % info["service"], info) def get_report(self, base_id): notification_endpoint = NotifyEndpoint(self.oslo_messaging, base_id) endpoints = [notification_endpoint] targets = [self.oslo_messaging.Target(topic="profiler")] server = self.oslo_messaging.notify.get_notification_listener( self.transport, targets, endpoints, executor="threading") state = dict(running=False) sfn = functools.partial(signal_handler, state=state) # modify signal handlers to handle interruption gracefully old_sigterm_handler = signal.signal(signal.SIGTERM, sfn) old_sigint_handler = signal.signal(signal.SIGINT, sfn) try: server.start() except self.oslo_messaging.server.ServerListenError: # failed to start the server raise except SignalExit: print("Execution interrupted while trying to connect to " "messaging server. No data was collected.") return {} # connected to server, now read the data try: # run until the trace is complete state["running"] = True while state["running"]: last_read_time = notification_endpoint.get_last_read_time() wait = self.idle_timeout - (time.time() - last_read_time) if wait < 0: state["running"] = False else: time.sleep(wait) except SignalExit: print("Execution interrupted. Terminating") finally: server.stop() server.wait() # restore original signal handlers signal.signal(signal.SIGTERM, old_sigterm_handler) signal.signal(signal.SIGINT, old_sigint_handler) events = notification_endpoint.get_messages() if not events: print("No events are collected for Trace UUID %s. Please note " "that osprofiler has read ALL events from profiler topic, " "but has not found any for specified Trace UUID." % base_id) for n in events: trace_id = n["trace_id"] parent_id = n["parent_id"] name = n["name"] project = n["project"] service = n["service"] host = n["info"]["host"] timestamp = n["timestamp"] self._append_results(trace_id, parent_id, name, project, service, host, timestamp, n) return self._parse_results() class NotifyEndpoint(object): def __init__(self, oslo_messaging, base_id): self.received_messages = [] self.last_read_time = time.time() self.filter_rule = oslo_messaging.NotificationFilter( payload={"base_id": base_id}) def info(self, ctxt, publisher_id, event_type, payload, metadata): self.received_messages.append(payload) self.last_read_time = time.time() def get_messages(self): return self.received_messages def get_last_read_time(self): return self.last_read_time # time when the latest event was received class SignalExit(BaseException): pass def signal_handler(signum, frame, state): state["running"] = False raise SignalExit() osprofiler-1.15.2/osprofiler/drivers/__init__.py0000666000175100017510000000054013241120010021744 0ustar zuulzuul00000000000000from osprofiler.drivers import base # noqa from osprofiler.drivers import ceilometer # noqa from osprofiler.drivers import elasticsearch_driver # noqa from osprofiler.drivers import loginsight # noqa from osprofiler.drivers import messaging # noqa from osprofiler.drivers import mongodb # noqa from osprofiler.drivers import redis_driver # noqa osprofiler-1.15.2/osprofiler/drivers/mongodb.py0000666000175100017510000000703213241120010021635 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osprofiler.drivers import base from osprofiler import exc class MongoDB(base.Driver): def __init__(self, connection_str, db_name="osprofiler", project=None, service=None, host=None, **kwargs): """MongoDB driver for OSProfiler.""" super(MongoDB, self).__init__(connection_str, project=project, service=service, host=host) try: from pymongo import MongoClient except ImportError: raise exc.CommandError( "To use this command, you should install " "'pymongo' manually. Use command:\n " "'pip install pymongo'.") client = MongoClient(self.connection_str, connect=False) self.db = client[db_name] @classmethod def get_name(cls): return "mongodb" def notify(self, info): """Send notifications to MongoDB. :param info: Contains information about trace element. In payload dict there are always 3 ids: "base_id" - uuid that is common for all notifications related to one trace. Used to simplify retrieving of all trace elements from MongoDB. "parent_id" - uuid of parent element in trace "trace_id" - uuid of current element in trace With parent_id and trace_id it's quite simple to build tree of trace elements, which simplify analyze of trace. """ data = info.copy() data["project"] = self.project data["service"] = self.service self.db.profiler.insert_one(data) def list_traces(self, query, fields=[]): """Returns array of all base_id fields that match the given criteria :param query: dict that specifies the query criteria :param fields: iterable of strings that specifies the output fields """ ids = self.db.profiler.find(query).distinct("base_id") out_format = {"base_id": 1, "timestamp": 1, "_id": 0} out_format.update({i: 1 for i in fields}) return [self.db.profiler.find( {"base_id": i}, out_format).sort("timestamp")[0] for i in ids] def get_report(self, base_id): """Retrieves and parses notification from MongoDB. :param base_id: Base id of trace elements. """ for n in self.db.profiler.find({"base_id": base_id}, {"_id": 0}): trace_id = n["trace_id"] parent_id = n["parent_id"] name = n["name"] project = n["project"] service = n["service"] host = n["info"]["host"] timestamp = n["timestamp"] self._append_results(trace_id, parent_id, name, project, service, host, timestamp, n) return self._parse_results() osprofiler-1.15.2/osprofiler/drivers/base.py0000666000175100017510000002301513241120010021121 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import logging import six.moves.urllib.parse as urlparse from osprofiler import _utils LOG = logging.getLogger(__name__) def get_driver(connection_string, *args, **kwargs): """Create driver's instance according to specified connection string""" # NOTE(ayelistratov) Backward compatibility with old Messaging notation # Remove after patching all OS services # NOTE(ishakhat) Raise exception when ParsedResult.scheme is empty if "://" not in connection_string: connection_string += "://" parsed_connection = urlparse.urlparse(connection_string) LOG.debug("String %s looks like a connection string, trying it.", connection_string) backend = parsed_connection.scheme for driver in _utils.itersubclasses(Driver): if backend == driver.get_name(): return driver(connection_string, *args, **kwargs) raise ValueError("Driver not found for connection string: " "%s" % connection_string) class Driver(object): """Base Driver class. This class provides protected common methods that do not rely on a specific storage backend. Public methods notify() and/or get_report(), which require using storage backend API, must be overridden and implemented by any class derived from this class. """ def __init__(self, connection_str, project=None, service=None, host=None): self.connection_str = connection_str self.project = project self.service = service self.host = host self.result = {} self.started_at = None self.finished_at = None # Last trace started time self.last_started_at = None def notify(self, info, **kwargs): """This method will be called on each notifier.notify() call. To add new drivers you should, create new subclass of this class and implement notify method. :param info: Contains information about trace element. In payload dict there are always 3 ids: "base_id" - uuid that is common for all notifications related to one trace. Used to simplify retrieving of all trace elements from the backend. "parent_id" - uuid of parent element in trace "trace_id" - uuid of current element in trace With parent_id and trace_id it's quite simple to build tree of trace elements, which simplify analyze of trace. """ raise NotImplementedError("{0}: This method is either not supported " "or has to be overridden".format( self.get_name())) def get_report(self, base_id): """Forms and returns report composed from the stored notifications. :param base_id: Base id of trace elements. """ raise NotImplementedError("{0}: This method is either not supported " "or has to be overridden".format( self.get_name())) @classmethod def get_name(cls): """Returns backend specific name for the driver.""" return cls.__name__ def list_traces(self, query, fields): """Returns array of all base_id fields that match the given criteria :param query: dict that specifies the query criteria :param fields: iterable of strings that specifies the output fields """ raise NotImplementedError("{0}: This method is either not supported " "or has to be overridden".format( self.get_name())) @staticmethod def _build_tree(nodes): """Builds the tree (forest) data structure based on the list of nodes. Tree building works in O(n*log(n)). :param nodes: dict of nodes, where each node is a dictionary with fields "parent_id", "trace_id", "info" :returns: list of top level ("root") nodes in form of dictionaries, each containing the "info" and "children" fields, where "children" is the list of child nodes ("children" will be empty for leafs) """ tree = [] for trace_id in nodes: node = nodes[trace_id] node.setdefault("children", []) parent_id = node["parent_id"] if parent_id in nodes: nodes[parent_id].setdefault("children", []) nodes[parent_id]["children"].append(node) else: tree.append(node) # no parent => top-level node for trace_id in nodes: nodes[trace_id]["children"].sort( key=lambda x: x["info"]["started"]) return sorted(tree, key=lambda x: x["info"]["started"]) def _append_results(self, trace_id, parent_id, name, project, service, host, timestamp, raw_payload=None): """Appends the notification to the dictionary of notifications. :param trace_id: UUID of current trace point :param parent_id: UUID of parent trace point :param name: name of operation :param project: project name :param service: service name :param host: host name or FQDN :param timestamp: Unicode-style timestamp matching the pattern "%Y-%m-%dT%H:%M:%S.%f" , e.g. 2016-04-18T17:42:10.77 :param raw_payload: raw notification without any filtering, with all fields included """ timestamp = datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f") if trace_id not in self.result: self.result[trace_id] = { "info": { "name": name.split("-")[0], "project": project, "service": service, "host": host, }, "trace_id": trace_id, "parent_id": parent_id, } self.result[trace_id]["info"]["meta.raw_payload.%s" % name] = raw_payload if name.endswith("stop"): self.result[trace_id]["info"]["finished"] = timestamp self.result[trace_id]["info"]["exception"] = "None" if raw_payload and "info" in raw_payload: exc = raw_payload["info"].get("etype", "None") self.result[trace_id]["info"]["exception"] = exc else: self.result[trace_id]["info"]["started"] = timestamp if not self.last_started_at or self.last_started_at < timestamp: self.last_started_at = timestamp if not self.started_at or self.started_at > timestamp: self.started_at = timestamp if not self.finished_at or self.finished_at < timestamp: self.finished_at = timestamp def _parse_results(self): """Parses Driver's notifications placed by _append_results() . :returns: full profiling report """ def msec(dt): # NOTE(boris-42): Unfortunately this is the simplest way that works # in py26 and py27 microsec = (dt.microseconds + (dt.seconds + dt.days * 24 * 3600) * 1e6) return int(microsec / 1000.0) stats = {} for r in self.result.values(): # NOTE(boris-42): We are not able to guarantee that the backend # consumed all messages => so we should at make duration 0ms. if "started" not in r["info"]: r["info"]["started"] = r["info"]["finished"] if "finished" not in r["info"]: r["info"]["finished"] = r["info"]["started"] op_type = r["info"]["name"] op_started = msec(r["info"]["started"] - self.started_at) op_finished = msec(r["info"]["finished"] - self.started_at) duration = op_finished - op_started r["info"]["started"] = op_started r["info"]["finished"] = op_finished if op_type not in stats: stats[op_type] = { "count": 1, "duration": duration } else: stats[op_type]["count"] += 1 stats[op_type]["duration"] += duration return { "info": { "name": "total", "started": 0, "finished": msec(self.finished_at - self.started_at) if self.started_at else None, "last_trace_started": msec( self.last_started_at - self.started_at ) if self.started_at else None }, "children": self._build_tree(self.result), "stats": stats } osprofiler-1.15.2/osprofiler/drivers/loginsight.py0000666000175100017510000002276413241117762022414 0ustar zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Classes to use VMware vRealize Log Insight as the trace data store. """ import json import logging as log import netaddr from oslo_concurrency.lockutils import synchronized import requests import six.moves.urllib.parse as urlparse from osprofiler.drivers import base from osprofiler import exc LOG = log.getLogger(__name__) class LogInsightDriver(base.Driver): """Driver for storing trace data in VMware vRealize Log Insight. The driver uses Log Insight ingest service to store trace data and uses the query service to retrieve it. The minimum required Log Insight version is 3.3. The connection string to initialize the driver should be of the format: loginsight://:@ If the username or password contains the character ':' or '@', it must be escaped using URL encoding. For example, the connection string to connect to Log Insight server at 10.1.2.3 using username "osprofiler" and password "p@ssword" is: loginsight://osprofiler:p%40ssword@10.1.2.3 """ def __init__( self, connection_str, project=None, service=None, host=None, **kwargs): super(LogInsightDriver, self).__init__(connection_str, project=project, service=service, host=host) parsed_connection = urlparse.urlparse(connection_str) try: creds, host = parsed_connection.netloc.split("@") username, password = creds.split(":") except ValueError: raise ValueError("Connection string format is: loginsight://" ":@. If the " "username or password contains the character '@' " "or ':', it must be escaped using URL encoding.") username = urlparse.unquote(username) password = urlparse.unquote(password) self._client = LogInsightClient(host, username, password) self._client.login() @classmethod def get_name(cls): return "loginsight" def notify(self, info): """Send trace to Log Insight server.""" trace = info.copy() trace["project"] = self.project trace["service"] = self.service event = {"text": "OSProfiler trace"} def _create_field(name, content): return {"name": name, "content": content} event["fields"] = [_create_field("base_id", trace["base_id"]), _create_field("trace_id", trace["trace_id"]), _create_field("project", trace["project"]), _create_field("service", trace["service"]), _create_field("name", trace["name"]), _create_field("trace", json.dumps(trace))] self._client.send_event(event) def get_report(self, base_id): """Retrieves and parses trace data from Log Insight. :param base_id: Trace base ID """ response = self._client.query_events({"base_id": base_id}) if "events" in response: for event in response["events"]: if "fields" not in event: continue for field in event["fields"]: if field["name"] == "trace": trace = json.loads(field["content"]) trace_id = trace["trace_id"] parent_id = trace["parent_id"] name = trace["name"] project = trace["project"] service = trace["service"] host = trace["info"]["host"] timestamp = trace["timestamp"] self._append_results( trace_id, parent_id, name, project, service, host, timestamp, trace) break return self._parse_results() class LogInsightClient(object): """A minimal Log Insight client.""" LI_OSPROFILER_AGENT_ID = "F52D775B-6017-4787-8C8A-F21AE0AEC057" # API paths SESSIONS_PATH = "api/v1/sessions" CURRENT_SESSIONS_PATH = "api/v1/sessions/current" EVENTS_INGEST_PATH = "api/v1/events/ingest/%s" % LI_OSPROFILER_AGENT_ID QUERY_EVENTS_BASE_PATH = "api/v1/events" def __init__(self, host, username, password, api_port=9000, api_ssl_port=9543, query_timeout=60000): self._host = host self._username = username self._password = password self._api_port = api_port self._api_ssl_port = api_ssl_port self._query_timeout = query_timeout self._session = requests.Session() self._session_id = None def _build_base_url(self, scheme): proto_str = "%s://" % scheme host_str = ("[%s]" % self._host if netaddr.valid_ipv6(self._host) else self._host) port_str = ":%d" % (self._api_ssl_port if scheme == "https" else self._api_port) return proto_str + host_str + port_str def _check_response(self, resp): if resp.status_code == 440: raise exc.LogInsightLoginTimeout() if not resp.ok: msg = "n/a" if resp.text: try: body = json.loads(resp.text) msg = body.get("errorMessage", msg) except ValueError: pass else: msg = resp.reason raise exc.LogInsightAPIError(msg) def _send_request( self, method, scheme, path, headers=None, body=None, params=None): url = "%s/%s" % (self._build_base_url(scheme), path) headers = headers or {} headers["content-type"] = "application/json" body = body or {} params = params or {} req = requests.Request( method, url, headers=headers, data=json.dumps(body), params=params) req = req.prepare() resp = self._session.send(req, verify=False) self._check_response(resp) return resp.json() def _get_auth_header(self): return {"X-LI-Session-Id": self._session_id} def _trunc_session_id(self): if self._session_id: return self._session_id[-5:] def _is_current_session_active(self): try: self._send_request("get", "https", self.CURRENT_SESSIONS_PATH, headers=self._get_auth_header()) LOG.debug("Current session %s is active.", self._trunc_session_id()) return True except (exc.LogInsightLoginTimeout, exc.LogInsightAPIError): LOG.debug("Current session %s is not active.", self._trunc_session_id()) return False @synchronized("li_login_lock") def login(self): # Another thread might have created the session while the current # thread was waiting for the lock. if self._session_id and self._is_current_session_active(): return LOG.info("Logging into Log Insight server: %s.", self._host) resp = self._send_request("post", "https", self.SESSIONS_PATH, body={"username": self._username, "password": self._password}) self._session_id = resp["sessionId"] LOG.debug("Established session %s.", self._trunc_session_id()) def send_event(self, event): events = {"events": [event]} self._send_request("post", "http", self.EVENTS_INGEST_PATH, body=events) def query_events(self, params): # Assumes that the keys and values in the params are strings and # the operator is "CONTAINS". constraints = [] for field, value in params.items(): constraints.append("%s/CONTAINS+%s" % (field, value)) constraints.append("timestamp/GT+0") path = "%s/%s" % (self.QUERY_EVENTS_BASE_PATH, "/".join(constraints)) def _query_events(): return self._send_request("get", "https", path, headers=self._get_auth_header(), params={"limit": 20000, "timeout": self._query_timeout}) try: resp = _query_events() except exc.LogInsightLoginTimeout: # Login again and re-try. LOG.debug("Current session timed out.") self.login() resp = _query_events() return resp osprofiler-1.15.2/osprofiler/sqlalchemy.py0000666000175100017510000000653013241117762020722 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import logging as log from oslo_utils import reflection from osprofiler import profiler LOG = log.getLogger(__name__) _DISABLED = False def disable(): """Disable tracing of all DB queries. Reduce a lot size of profiles.""" global _DISABLED _DISABLED = True def enable(): """add_tracing adds event listeners for sqlalchemy.""" global _DISABLED _DISABLED = False def add_tracing(sqlalchemy, engine, name, hide_result=True): """Add tracing to all sqlalchemy calls.""" if not _DISABLED: sqlalchemy.event.listen(engine, "before_cursor_execute", _before_cursor_execute(name)) sqlalchemy.event.listen( engine, "after_cursor_execute", _after_cursor_execute(hide_result=hide_result) ) sqlalchemy.event.listen(engine, "handle_error", handle_error) @contextlib.contextmanager def wrap_session(sqlalchemy, sess): with sess as s: if not getattr(s.bind, "traced", False): add_tracing(sqlalchemy, s.bind, "db") s.bind.traced = True yield s def _before_cursor_execute(name): """Add listener that will send trace info before query is executed.""" def handler(conn, cursor, statement, params, context, executemany): info = {"db": { "statement": statement, "params": params} } profiler.start(name, info=info) return handler def _after_cursor_execute(hide_result=True): """Add listener that will send trace info after query is executed. :param hide_result: Boolean value to hide or show SQL result in trace. True - hide SQL result (default). False - show SQL result in trace. """ def handler(conn, cursor, statement, params, context, executemany): if not hide_result: # Add SQL result to trace info in *-stop phase info = { "db": { "result": str(cursor._rows) } } profiler.stop(info=info) else: profiler.stop() return handler def handle_error(exception_context): """Handle SQLAlchemy errors""" exception_class_name = reflection.get_class_name( exception_context.original_exception) original_exception = str(exception_context.original_exception) chained_exception = str(exception_context.chained_exception) info = { "etype": exception_class_name, "db": { "original_exception": original_exception, "chained_exception": chained_exception } } profiler.stop(info=info) LOG.debug("OSProfiler has handled SQLAlchemy error: %s", original_exception) osprofiler-1.15.2/osprofiler/profiler.py0000666000175100017510000003635213241120010020363 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import datetime import functools import inspect import socket import threading from oslo_utils import reflection from oslo_utils import uuidutils from osprofiler import notifier # NOTE(boris-42): Thread safe storage for profiler instances. __local_ctx = threading.local() def _clean(): __local_ctx.profiler = None def _ensure_no_multiple_traced(traceable_attrs): for attr_name, attr in traceable_attrs: traced_times = getattr(attr, "__traced__", 0) if traced_times: raise ValueError("Can not apply new trace on top of" " previously traced attribute '%s' since" " it has been traced %s times previously" % (attr_name, traced_times)) def init(hmac_key, base_id=None, parent_id=None): """Init profiler instance for current thread. You should call profiler.init() before using osprofiler. Otherwise profiler.start() and profiler.stop() methods won't do anything. :param hmac_key: secret key to sign trace information. :param base_id: Used to bind all related traces. :param parent_id: Used to build tree of traces. :returns: Profiler instance """ if get() is None: __local_ctx.profiler = _Profiler(hmac_key, base_id=base_id, parent_id=parent_id) return __local_ctx.profiler def get(): """Get profiler instance. :returns: Profiler instance or None if profiler wasn't inited. """ return getattr(__local_ctx, "profiler", None) def start(name, info=None): """Send new start notification if profiler instance is presented. :param name: The name of action. E.g. wsgi, rpc, db, etc.. :param info: Dictionary with extra trace information. For example in wsgi it can be url, in rpc - message or in db sql - request. """ profiler = get() if profiler: profiler.start(name, info=info) def stop(info=None): """Send new stop notification if profiler instance is presented.""" profiler = get() if profiler: profiler.stop(info=info) def trace(name, info=None, hide_args=False, hide_result=True, allow_multiple_trace=True): """Trace decorator for functions. Very useful if you would like to add trace point on existing function: >> @profiler.trace("my_point") >> def my_func(self, some_args): >> #code :param name: The name of action. E.g. wsgi, rpc, db, etc.. :param info: Dictionary with extra trace information. For example in wsgi it can be url, in rpc - message or in db sql - request. :param hide_args: Don't push to trace info args and kwargs. Quite useful if you have some info in args that you wont to share, e.g. passwords. :param hide_result: Boolean value to hide/show function result in trace. True - hide function result (default). False - show function result in trace. :param allow_multiple_trace: If the wrapped function has already been traced either allow the new trace to occur or raise a value error denoting that multiple tracing is not allowed (by default allow). """ if not info: info = {} else: info = info.copy() info["function"] = {} def decorator(f): trace_times = getattr(f, "__traced__", 0) if not allow_multiple_trace and trace_times: raise ValueError("Function '%s' has already" " been traced %s times" % (f, trace_times)) try: f.__traced__ = trace_times + 1 except AttributeError: # Tries to work around the following: # # AttributeError: 'instancemethod' object has no # attribute '__traced__' try: f.im_func.__traced__ = trace_times + 1 except AttributeError: # nosec pass @functools.wraps(f) def wrapper(*args, **kwargs): # NOTE(tovin07): Workaround for this issue # F823 local variable 'info' # (defined in enclosing scope on line xxx) # referenced before assignment info_ = info if "name" not in info_["function"]: # Get this once (as it should **not** be changing in # subsequent calls). info_["function"]["name"] = reflection.get_callable_name(f) if not hide_args: info_["function"]["args"] = str(args) info_["function"]["kwargs"] = str(kwargs) stop_info = None try: start(name, info=info_) result = f(*args, **kwargs) except Exception as ex: stop_info = {"etype": reflection.get_class_name(ex)} raise else: if not hide_result: stop_info = {"function": {"result": repr(result)}} return result finally: if stop_info: stop(info=stop_info) else: stop() return wrapper return decorator def trace_cls(name, info=None, hide_args=False, hide_result=True, trace_private=False, allow_multiple_trace=True, trace_class_methods=False, trace_static_methods=False): """Trace decorator for instances of class . Very useful if you would like to add trace point on existing method: >> @profiler.trace_cls("rpc") >> RpcManagerClass(object): >> >> def my_method(self, some_args): >> pass >> >> def my_method2(self, some_arg1, some_arg2, kw=None, kw2=None) >> pass >> :param name: The name of action. E.g. wsgi, rpc, db, etc.. :param info: Dictionary with extra trace information. For example in wsgi it can be url, in rpc - message or in db sql - request. :param hide_args: Don't push to trace info args and kwargs. Quite useful if you have some info in args that you wont to share, e.g. passwords. :param hide_result: Boolean value to hide/show function result in trace. True - hide function result (default). False - show function result in trace. :param trace_private: Trace methods that starts with "_". It wont trace methods that starts "__" even if it is turned on. :param trace_static_methods: Trace staticmethods. This may be prone to issues so careful usage is recommended (this is also why this defaults to false). :param trace_class_methods: Trace classmethods. This may be prone to issues so careful usage is recommended (this is also why this defaults to false). :param allow_multiple_trace: If wrapped attributes have already been traced either allow the new trace to occur or raise a value error denoting that multiple tracing is not allowed (by default allow). """ def trace_checker(attr_name, to_be_wrapped): if attr_name.startswith("__"): # Never trace really private methods. return (False, None) if not trace_private and attr_name.startswith("_"): return (False, None) if isinstance(to_be_wrapped, staticmethod): if not trace_static_methods: return (False, None) return (True, staticmethod) if isinstance(to_be_wrapped, classmethod): if not trace_class_methods: return (False, None) return (True, classmethod) return (True, None) def decorator(cls): clss = cls if inspect.isclass(cls) else cls.__class__ mro_dicts = [c.__dict__ for c in inspect.getmro(clss)] traceable_attrs = [] traceable_wrappers = [] for attr_name, attr in inspect.getmembers(cls): if not (inspect.ismethod(attr) or inspect.isfunction(attr)): continue wrapped_obj = None for cls_dict in mro_dicts: if attr_name in cls_dict: wrapped_obj = cls_dict[attr_name] break should_wrap, wrapper = trace_checker(attr_name, wrapped_obj) if not should_wrap: continue traceable_attrs.append((attr_name, attr)) traceable_wrappers.append(wrapper) if not allow_multiple_trace: # Check before doing any other further work (so we don't # halfway trace this class). _ensure_no_multiple_traced(traceable_attrs) for i, (attr_name, attr) in enumerate(traceable_attrs): wrapped_method = trace(name, info=info, hide_args=hide_args, hide_result=hide_result)(attr) wrapper = traceable_wrappers[i] if wrapper is not None: wrapped_method = wrapper(wrapped_method) setattr(cls, attr_name, wrapped_method) return cls return decorator class TracedMeta(type): """Metaclass to comfortably trace all children of a specific class. Possible usage: >>> @six.add_metaclass(profiler.TracedMeta) >>> class RpcManagerClass(object): >>> __trace_args__ = {'name': 'rpc', >>> 'info': None, >>> 'hide_args': False, >>> 'hide_result': True, >>> 'trace_private': False} >>> >>> def my_method(self, some_args): >>> pass >>> >>> def my_method2(self, some_arg1, some_arg2, kw=None, kw2=None) >>> pass Adding of this metaclass requires to set __trace_args__ attribute to the class we want to modify. __trace_args__ is the dictionary with one mandatory key included - "name", that will define name of action to be traced - E.g. wsgi, rpc, db, etc... """ def __init__(cls, cls_name, bases, attrs): super(TracedMeta, cls).__init__(cls_name, bases, attrs) trace_args = dict(getattr(cls, "__trace_args__", {})) trace_private = trace_args.pop("trace_private", False) allow_multiple_trace = trace_args.pop("allow_multiple_trace", True) if "name" not in trace_args: raise TypeError("Please specify __trace_args__ class level " "dictionary attribute with mandatory 'name' key - " "e.g. __trace_args__ = {'name': 'rpc'}") traceable_attrs = [] for attr_name, attr_value in attrs.items(): if not (inspect.ismethod(attr_value) or inspect.isfunction(attr_value)): continue if attr_name.startswith("__"): continue if not trace_private and attr_name.startswith("_"): continue traceable_attrs.append((attr_name, attr_value)) if not allow_multiple_trace: # Check before doing any other further work (so we don't # halfway trace this class). _ensure_no_multiple_traced(traceable_attrs) for attr_name, attr_value in traceable_attrs: setattr(cls, attr_name, trace(**trace_args)(getattr(cls, attr_name))) class Trace(object): def __init__(self, name, info=None): """With statement way to use profiler start()/stop(). >> with profiler.Trace("rpc", info={"any": "values"}) >> some code instead of >> profiler.start() >> try: >> your code >> finally: profiler.stop() """ self._name = name self._info = info def __enter__(self): start(self._name, info=self._info) def __exit__(self, etype, value, traceback): if etype: info = {"etype": reflection.get_class_name(etype)} stop(info=info) else: stop() class _Profiler(object): def __init__(self, hmac_key, base_id=None, parent_id=None): self.hmac_key = hmac_key if not base_id: base_id = str(uuidutils.generate_uuid()) self._trace_stack = collections.deque([base_id, parent_id or base_id]) self._name = collections.deque() self._host = socket.gethostname() def get_base_id(self): """Return base id of a trace. Base id is the same for all elements in one trace. It's main goal is to be able to retrieve by one request all trace elements from storage. """ return self._trace_stack[0] def get_parent_id(self): """Returns parent trace element id.""" return self._trace_stack[-2] def get_id(self): """Returns current trace element id.""" return self._trace_stack[-1] def start(self, name, info=None): """Start new event. Adds new trace_id to trace stack and sends notification to collector (may be ceilometer). With "info" and 3 ids: base_id - to be able to retrieve all trace elements by one query parent_id - to build tree of events (not just a list) trace_id - current event id. :param name: name of trace element (db, wsgi, rpc, etc..) :param info: Dictionary with any useful information related to this trace element. (sql request, rpc message or url...) """ info = info or {} info["host"] = self._host self._name.append(name) self._trace_stack.append(str(uuidutils.generate_uuid())) self._notify("%s-start" % name, info) def stop(self, info=None): """Finish latest event. Same as a start, but instead of pushing trace_id to stack it pops it. :param info: Dict with useful info. It will be send in notification. """ info = info or {} info["host"] = self._host self._notify("%s-stop" % self._name.pop(), info) self._trace_stack.pop() def _notify(self, name, info): payload = { "name": name, "base_id": self.get_base_id(), "trace_id": self.get_id(), "parent_id": self.get_parent_id(), "timestamp": datetime.datetime.utcnow().strftime( "%Y-%m-%dT%H:%M:%S.%f"), } if info: payload["info"] = info notifier.notify(payload) osprofiler-1.15.2/osprofiler/cmd/0000775000175100017510000000000013241120161016726 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/cmd/template.html0000666000175100017510000003316413241117762021455 0ustar zuulzuul00000000000000
Levels Duration Type Project Service Host Details
osprofiler-1.15.2/osprofiler/cmd/__init__.py0000666000175100017510000000000013241117762021044 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/cmd/cliutils.py0000666000175100017510000000330313241117762021146 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os def env(*args, **kwargs): """Returns the first environment variable set. If all are empty, defaults to '' or keyword arg `default`. """ for arg in args: value = os.environ.get(arg) if value: return value return kwargs.get("default", "") def arg(*args, **kwargs): """Decorator for CLI args. Example: >>> @arg("name", help="Name of the new entity") ... def entity_create(args): ... pass """ def _decorator(func): add_arg(func, *args, **kwargs) return func return _decorator def add_arg(func, *args, **kwargs): """Bind CLI arguments to a shell.py `do_foo` function.""" if not hasattr(func, "arguments"): func.arguments = [] # NOTE(sirp): avoid dups that can occur when the module is shared across # tests. if (args, kwargs) not in func.arguments: # Because of the semantics of decorator composition if we just append # to the options list positional options will appear to be backwards. func.arguments.insert(0, (args, kwargs)) osprofiler-1.15.2/osprofiler/cmd/commands.py0000666000175100017510000001427713241120010021107 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os from oslo_utils import uuidutils from osprofiler.cmd import cliutils from osprofiler.drivers import base from osprofiler import exc class BaseCommand(object): group_name = None class TraceCommands(BaseCommand): group_name = "trace" @cliutils.arg("trace", help="File with trace or trace id") @cliutils.arg("--connection-string", dest="conn_str", default=(cliutils.env("OSPROFILER_CONNECTION_STRING") or "ceilometer://"), help="Storage driver's connection string. Defaults to " "env[OSPROFILER_CONNECTION_STRING] if set, else " "ceilometer://") @cliutils.arg("--transport-url", dest="transport_url", help="Oslo.messaging transport URL (for messaging:// driver " "only), e.g. rabbit://user:password@host:5672/") @cliutils.arg("--idle-timeout", dest="idle_timeout", type=int, default=1, help="How long to wait for the trace to finish, in seconds " "(for messaging:// driver only)") @cliutils.arg("--json", dest="use_json", action="store_true", help="show trace in JSON") @cliutils.arg("--html", dest="use_html", action="store_true", help="show trace in HTML") @cliutils.arg("--local-libs", dest="local_libs", action="store_true", help="use local static files of html in /libs/") @cliutils.arg("--dot", dest="use_dot", action="store_true", help="show trace in DOT language") @cliutils.arg("--render-dot", dest="render_dot_filename", help="filename for rendering the dot graph in pdf format") @cliutils.arg("--out", dest="file_name", help="save output in file") def show(self, args): """Display trace results in HTML, JSON or DOT format.""" trace = None if not uuidutils.is_uuid_like(args.trace): trace = json.load(open(args.trace)) else: try: engine = base.get_driver(args.conn_str, **args.__dict__) except Exception as e: raise exc.CommandError(e.message) trace = engine.get_report(args.trace) if not trace or not trace.get("children"): msg = ("Trace with UUID %s not found. Please check the HMAC key " "used in the command." % args.trace) raise exc.CommandError(msg) # NOTE(ayelistratov): Ceilometer translates datetime objects to # strings, other drivers store this data in ISO Date format. # Since datetime.datetime is not JSON serializable by default, # this method will handle that. def datetime_json_serialize(obj): if hasattr(obj, "isoformat"): return obj.isoformat() else: return obj if args.use_json: output = json.dumps(trace, default=datetime_json_serialize, separators=(",", ": "), indent=2) elif args.use_html: with open(os.path.join(os.path.dirname(__file__), "template.html")) as html_template: output = html_template.read().replace( "$DATA", json.dumps(trace, indent=4, separators=(",", ": "), default=datetime_json_serialize)) if args.local_libs: output = output.replace("$LOCAL", "true") else: output = output.replace("$LOCAL", "false") elif args.use_dot: dot_graph = self._create_dot_graph(trace) output = dot_graph.source if args.render_dot_filename: dot_graph.render(args.render_dot_filename, cleanup=True) else: raise exc.CommandError("You should choose one of the following " "output formats: json, html or dot.") if args.file_name: with open(args.file_name, "w+") as output_file: output_file.write(output) else: print(output) def _create_dot_graph(self, trace): try: import graphviz except ImportError: raise exc.CommandError( "graphviz library is required to use this option.") dot = graphviz.Digraph(format="pdf") next_id = [0] def _create_node(info): time_taken = info["finished"] - info["started"] service = info["service"] + ":" if "service" in info else "" name = info["name"] label = "%s%s - %d ms" % (service, name, time_taken) if name == "wsgi": req = info["meta.raw_payload.wsgi-start"]["info"]["request"] label = "%s\\n%s %s.." % (label, req["method"], req["path"][:30]) elif name == "rpc" or name == "driver": raw = info["meta.raw_payload.%s-start" % name] fn_name = raw["info"]["function"]["name"] label = "%s\\n%s" % (label, fn_name.split(".")[-1]) node_id = str(next_id[0]) next_id[0] += 1 dot.node(node_id, label) return node_id def _create_sub_graph(root): rid = _create_node(root["info"]) for child in root["children"]: cid = _create_sub_graph(child) dot.edge(rid, cid) return rid _create_sub_graph(trace) return dot osprofiler-1.15.2/osprofiler/cmd/shell.py0000666000175100017510000002202313241120010020401 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Command-line interface to the OpenStack Profiler. """ import argparse import inspect import sys from oslo_config import cfg import osprofiler from osprofiler.cmd import cliutils from osprofiler.cmd import commands from osprofiler import exc from osprofiler import opts class OSProfilerShell(object): def __init__(self, argv): args = self._get_base_parser().parse_args(argv) opts.set_defaults(cfg.CONF) args.func(args) def _get_base_parser(self): parser = argparse.ArgumentParser( prog="osprofiler", description=__doc__.strip(), add_help=True ) parser.add_argument("-v", "--version", action="version", version=osprofiler.__version__) self._append_ceilometer_args(parser) self._append_identity_args(parser) self._append_subcommands(parser) return parser def _append_ceilometer_args(self, parent_parser): parser = parent_parser.add_argument_group("ceilometer") parser.add_argument( "--ceilometer-url", default=cliutils.env("CEILOMETER_URL"), help="Defaults to env[CEILOMETER_URL].") parser.add_argument( "--ceilometer-api-version", default=cliutils.env("CEILOMETER_API_VERSION", default="2"), help="Defaults to env[CEILOMETER_API_VERSION] or 2.") def _append_identity_args(self, parent_parser): # FIXME(fabgia): identity related parameters should be passed by the # Keystone client itself to avoid constant update in all the services # clients. When this fix is merged this method can be made obsolete. # Bug: https://bugs.launchpad.net/python-keystoneclient/+bug/1332337 parser = parent_parser.add_argument_group("identity") parser.add_argument("-k", "--insecure", default=False, action="store_true", help="Explicitly allow osprofiler to " "perform \"insecure\" SSL (https) requests. " "The server's certificate will " "not be verified against any certificate " "authorities. This option should be used with " "caution.") # User related options parser.add_argument("--os-username", default=cliutils.env("OS_USERNAME"), help="Defaults to env[OS_USERNAME].") parser.add_argument("--os-user-id", default=cliutils.env("OS_USER_ID"), help="Defaults to env[OS_USER_ID].") parser.add_argument("--os-password", default=cliutils.env("OS_PASSWORD"), help="Defaults to env[OS_PASSWORD].") # Domain related options parser.add_argument("--os-user-domain-id", default=cliutils.env("OS_USER_DOMAIN_ID"), help="Defaults to env[OS_USER_DOMAIN_ID].") parser.add_argument("--os-user-domain-name", default=cliutils.env("OS_USER_DOMAIN_NAME"), help="Defaults to env[OS_USER_DOMAIN_NAME].") parser.add_argument("--os-project-domain-id", default=cliutils.env("OS_PROJECT_DOMAIN_ID"), help="Defaults to env[OS_PROJECT_DOMAIN_ID].") parser.add_argument("--os-project-domain-name", default=cliutils.env("OS_PROJECT_DOMAIN_NAME"), help="Defaults to env[OS_PROJECT_DOMAIN_NAME].") # Project V3 or Tenant V2 related options parser.add_argument("--os-project-id", default=cliutils.env("OS_PROJECT_ID"), help="Another way to specify tenant ID. " "This option is mutually exclusive with " " --os-tenant-id. " "Defaults to env[OS_PROJECT_ID].") parser.add_argument("--os-project-name", default=cliutils.env("OS_PROJECT_NAME"), help="Another way to specify tenant name. " "This option is mutually exclusive with " " --os-tenant-name. " "Defaults to env[OS_PROJECT_NAME].") parser.add_argument("--os-tenant-id", default=cliutils.env("OS_TENANT_ID"), help="This option is mutually exclusive with " " --os-project-id. " "Defaults to env[OS_PROJECT_ID].") parser.add_argument("--os-tenant-name", default=cliutils.env("OS_TENANT_NAME"), help="Defaults to env[OS_TENANT_NAME].") # Auth related options parser.add_argument("--os-auth-url", default=cliutils.env("OS_AUTH_URL"), help="Defaults to env[OS_AUTH_URL].") parser.add_argument("--os-auth-token", default=cliutils.env("OS_AUTH_TOKEN"), help="Defaults to env[OS_AUTH_TOKEN].") parser.add_argument("--os-cacert", metavar="", dest="os_cacert", default=cliutils.env("OS_CACERT"), help="Path of CA TLS certificate(s) used to verify" " the remote server\"s certificate. Without this " "option ceilometer looks for the default system CA" " certificates.") parser.add_argument("--os-cert", help="Path of certificate file to use in SSL " "connection. This file can optionally be " "prepended with the private key.") parser.add_argument("--os-key", help="Path of client key to use in SSL " "connection. This option is not necessary " "if your key is prepended to your cert file.") # Service Catalog related options parser.add_argument("--os-service-type", default=cliutils.env("OS_SERVICE_TYPE"), help="Defaults to env[OS_SERVICE_TYPE].") parser.add_argument("--os-endpoint-type", default=cliutils.env("OS_ENDPOINT_TYPE"), help="Defaults to env[OS_ENDPOINT_TYPE].") parser.add_argument("--os-region-name", default=cliutils.env("OS_REGION_NAME"), help="Defaults to env[OS_REGION_NAME].") def _append_subcommands(self, parent_parser): subcommands = parent_parser.add_subparsers(help="") for group_cls in commands.BaseCommand.__subclasses__(): group_parser = subcommands.add_parser(group_cls.group_name) subcommand_parser = group_parser.add_subparsers() for name, callback in inspect.getmembers( group_cls(), predicate=inspect.ismethod): command = name.replace("_", "-") desc = callback.__doc__ or "" help_message = desc.strip().split("\n")[0] arguments = getattr(callback, "arguments", []) command_parser = subcommand_parser.add_parser( command, help=help_message, description=desc) for (args, kwargs) in arguments: command_parser.add_argument(*args, **kwargs) command_parser.set_defaults(func=callback) def _no_project_and_domain_set(self, args): if not (args.os_project_id or (args.os_project_name and (args.os_user_domain_name or args.os_user_domain_id)) or (args.os_tenant_id or args.os_tenant_name)): return True else: return False def main(args=None): if args is None: args = sys.argv[1:] try: OSProfilerShell(args) except exc.CommandError as e: print(e.message) return 1 if __name__ == "__main__": main() osprofiler-1.15.2/osprofiler/tests/0000775000175100017510000000000013241120161017325 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/__init__.py0000666000175100017510000000000013241117762021443 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/functional/0000775000175100017510000000000013241120161021467 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/functional/config.cfg0000666000175100017510000000013613241117762023434 0ustar zuulzuul00000000000000[DEFAULT] transport_url=rabbit://localhost:5672/ [profiler] connection_string="messaging://" osprofiler-1.15.2/osprofiler/tests/functional/__init__.py0000666000175100017510000000000013241117762023605 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/functional/test_driver.py0000666000175100017510000001032613241120010024370 0ustar zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from oslo_config import cfg from osprofiler.drivers import base from osprofiler import initializer from osprofiler import opts from osprofiler import profiler from osprofiler.tests import test CONF = cfg.CONF LOG = logging.getLogger(__name__) @profiler.trace_cls("rpc", hide_args=True) class Foo(object): def bar(self, x): return self.baz(x, x) def baz(self, x, y): return x * y class DriverTestCase(test.FunctionalTestCase): SERVICE = "service" PROJECT = "project" def setUp(self): super(DriverTestCase, self).setUp() CONF(["--config-file", os.path.dirname(__file__) + "/config.cfg"]) opts.set_defaults(CONF, enabled=True, trace_sqlalchemy=False, hmac_keys="SECRET_KEY") def _assert_dict(self, info, **kwargs): for key in kwargs: self.assertEqual(kwargs[key], info[key]) def _assert_child_dict(self, child, base_id, parent_id, name, fn_name): self.assertEqual(parent_id, child["parent_id"]) exp_info = {"name": "rpc", "service": self.SERVICE, "project": self.PROJECT} self._assert_dict(child["info"], **exp_info) raw_start = child["info"]["meta.raw_payload.%s-start" % name] self.assertEqual(fn_name, raw_start["info"]["function"]["name"]) exp_raw = {"name": "%s-start" % name, "service": self.SERVICE, "trace_id": child["trace_id"], "project": self.PROJECT, "base_id": base_id} self._assert_dict(raw_start, **exp_raw) raw_stop = child["info"]["meta.raw_payload.%s-stop" % name] exp_raw["name"] = "%s-stop" % name self._assert_dict(raw_stop, **exp_raw) def test_get_report(self): # initialize profiler notifier (the same way as in services) initializer.init_from_conf( CONF, {}, self.PROJECT, self.SERVICE, "host") profiler.init("SECRET_KEY") # grab base_id base_id = profiler.get().get_base_id() # execute profiled code foo = Foo() foo.bar(1) # instantiate report engine (the same way as in osprofiler CLI) engine = base.get_driver(CONF.profiler.connection_string, project=self.PROJECT, service=self.SERVICE, host="host", conf=CONF) # generate the report report = engine.get_report(base_id) LOG.debug("OSProfiler report: %s", report) # verify the report self.assertEqual("total", report["info"]["name"]) self.assertEqual(2, report["stats"]["rpc"]["count"]) self.assertEqual(1, len(report["children"])) cbar = report["children"][0] self._assert_child_dict( cbar, base_id, base_id, "rpc", "osprofiler.tests.functional.test_driver.Foo.bar") self.assertEqual(1, len(cbar["children"])) cbaz = cbar["children"][0] self._assert_child_dict( cbaz, base_id, cbar["trace_id"], "rpc", "osprofiler.tests.functional.test_driver.Foo.baz") class RedisDriverTestCase(DriverTestCase): def setUp(self): super(DriverTestCase, self).setUp() CONF([]) opts.set_defaults(CONF, connection_string="redis://localhost:6379", enabled=True, trace_sqlalchemy=False, hmac_keys="SECRET_KEY") osprofiler-1.15.2/osprofiler/tests/unit/0000775000175100017510000000000013241120161020304 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/test_web.py0000666000175100017510000002653313241117762022522 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from webob import response as webob_response from osprofiler import _utils as utils from osprofiler import profiler from osprofiler.tests import test from osprofiler import web def dummy_app(environ, response): res = webob_response.Response() return res(environ, response) class WebTestCase(test.TestCase): def setUp(self): super(WebTestCase, self).setUp() profiler._clean() self.addCleanup(profiler._clean) def test_get_trace_id_headers_no_hmac(self): profiler.init(None, base_id="y", parent_id="z") headers = web.get_trace_id_headers() self.assertEqual(headers, {}) def test_get_trace_id_headers(self): profiler.init("key", base_id="y", parent_id="z") headers = web.get_trace_id_headers() self.assertEqual(sorted(headers.keys()), sorted(["X-Trace-Info", "X-Trace-HMAC"])) trace_info = utils.signed_unpack(headers["X-Trace-Info"], headers["X-Trace-HMAC"], ["key"]) self.assertIn("hmac_key", trace_info) self.assertEqual("key", trace_info.pop("hmac_key")) self.assertEqual({"parent_id": "z", "base_id": "y"}, trace_info) @mock.patch("osprofiler.profiler.get") def test_get_trace_id_headers_no_profiler(self, mock_get_profiler): mock_get_profiler.return_value = False headers = web.get_trace_id_headers() self.assertEqual(headers, {}) class WebMiddlewareTestCase(test.TestCase): def setUp(self): super(WebMiddlewareTestCase, self).setUp() profiler._clean() # it's default state of _ENABLED param, so let's set it here web._ENABLED = None self.addCleanup(profiler._clean) def tearDown(self): web.enable() super(WebMiddlewareTestCase, self).tearDown() def test_factory(self): mock_app = mock.MagicMock() local_conf = {"enabled": True, "hmac_keys": "123"} factory = web.WsgiMiddleware.factory(None, **local_conf) wsgi = factory(mock_app) self.assertEqual(wsgi.application, mock_app) self.assertEqual(wsgi.name, "wsgi") self.assertTrue(wsgi.enabled) self.assertEqual(wsgi.hmac_keys, [local_conf["hmac_keys"]]) def _test_wsgi_middleware_with_invalid_trace(self, headers, hmac_key, mock_profiler_init, enabled=True): request = mock.MagicMock() request.get_response.return_value = "yeah!" request.headers = headers middleware = web.WsgiMiddleware("app", hmac_key, enabled=enabled) self.assertEqual("yeah!", middleware(request)) request.get_response.assert_called_once_with("app") self.assertEqual(0, mock_profiler_init.call_count) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_disabled(self, mock_profiler_init): hmac_key = "secret" pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key) headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0], "X-Trace-HMAC": pack[1] } self._test_wsgi_middleware_with_invalid_trace(headers, hmac_key, mock_profiler_init, enabled=False) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_no_trace(self, mock_profiler_init): headers = { "a": "1", "b": "2" } self._test_wsgi_middleware_with_invalid_trace(headers, "secret", mock_profiler_init) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_invalid_trace_headers(self, mock_profiler_init): headers = { "a": "1", "b": "2", "X-Trace-Info": "abbababababa", "X-Trace-HMAC": "abbababababa" } self._test_wsgi_middleware_with_invalid_trace(headers, "secret", mock_profiler_init) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_no_trace_hmac(self, mock_profiler_init): hmac_key = "secret" pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key) headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0] } self._test_wsgi_middleware_with_invalid_trace(headers, hmac_key, mock_profiler_init) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_invalid_hmac(self, mock_profiler_init): hmac_key = "secret" pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key) headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0], "X-Trace-HMAC": "not valid hmac" } self._test_wsgi_middleware_with_invalid_trace(headers, hmac_key, mock_profiler_init) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_invalid_trace_info(self, mock_profiler_init): hmac_key = "secret" pack = utils.signed_pack([{"base_id": "1"}, {"parent_id": "2"}], hmac_key) headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0], "X-Trace-HMAC": pack[1] } self._test_wsgi_middleware_with_invalid_trace(headers, hmac_key, mock_profiler_init) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_key_passthrough(self, mock_profiler_init): hmac_key = "secret2" request = mock.MagicMock() request.get_response.return_value = "yeah!" request.url = "someurl" request.host_url = "someurl" request.path = "path" request.query_string = "query" request.method = "method" request.scheme = "scheme" pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key) request.headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0], "X-Trace-HMAC": pack[1] } middleware = web.WsgiMiddleware("app", "secret1,%s" % hmac_key, enabled=True) self.assertEqual("yeah!", middleware(request)) mock_profiler_init.assert_called_once_with(hmac_key=hmac_key, base_id="1", parent_id="2") @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_key_passthrough2(self, mock_profiler_init): hmac_key = "secret1" request = mock.MagicMock() request.get_response.return_value = "yeah!" request.url = "someurl" request.host_url = "someurl" request.path = "path" request.query_string = "query" request.method = "method" request.scheme = "scheme" pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key) request.headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0], "X-Trace-HMAC": pack[1] } middleware = web.WsgiMiddleware("app", "%s,secret2" % hmac_key, enabled=True) self.assertEqual("yeah!", middleware(request)) mock_profiler_init.assert_called_once_with(hmac_key=hmac_key, base_id="1", parent_id="2") @mock.patch("osprofiler.web.profiler.Trace") @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware(self, mock_profiler_init, mock_profiler_trace): hmac_key = "secret" request = mock.MagicMock() request.get_response.return_value = "yeah!" request.url = "someurl" request.host_url = "someurl" request.path = "path" request.query_string = "query" request.method = "method" request.scheme = "scheme" pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key) request.headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0], "X-Trace-HMAC": pack[1] } middleware = web.WsgiMiddleware("app", hmac_key, enabled=True) self.assertEqual("yeah!", middleware(request)) mock_profiler_init.assert_called_once_with(hmac_key=hmac_key, base_id="1", parent_id="2") expected_info = { "request": { "path": request.path, "query": request.query_string, "method": request.method, "scheme": request.scheme } } mock_profiler_trace.assert_called_once_with("wsgi", info=expected_info) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_disable_via_python(self, mock_profiler_init): request = mock.MagicMock() request.get_response.return_value = "yeah!" web.disable() middleware = web.WsgiMiddleware("app", "hmac_key", enabled=True) self.assertEqual("yeah!", middleware(request)) self.assertEqual(mock_profiler_init.call_count, 0) @mock.patch("osprofiler.web.profiler.init") def test_wsgi_middleware_enable_via_python(self, mock_profiler_init): request = mock.MagicMock() request.get_response.return_value = "yeah!" request.url = "someurl" request.host_url = "someurl" request.path = "path" request.query_string = "query" request.method = "method" request.scheme = "scheme" hmac_key = "super_secret_key2" pack = utils.signed_pack({"base_id": "1", "parent_id": "2"}, hmac_key) request.headers = { "a": "1", "b": "2", "X-Trace-Info": pack[0], "X-Trace-HMAC": pack[1] } web.enable("super_secret_key1,super_secret_key2") middleware = web.WsgiMiddleware("app", enabled=True) self.assertEqual("yeah!", middleware(request)) mock_profiler_init.assert_called_once_with(hmac_key=hmac_key, base_id="1", parent_id="2") def test_disable(self): web.disable() self.assertFalse(web._ENABLED) def test_enabled(self): web.disable() web.enable() self.assertTrue(web._ENABLED) osprofiler-1.15.2/osprofiler/tests/unit/test_utils.py0000666000175100017510000001067313241117762023103 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import base64 import hashlib import hmac import mock from osprofiler import _utils as utils from osprofiler.tests import test class UtilsTestCase(test.TestCase): def test_split(self): self.assertEqual([1, 2], utils.split([1, 2])) self.assertEqual(["A", "B"], utils.split("A, B")) self.assertEqual(["A", " B"], utils.split("A, B", strip=False)) def test_split_wrong_type(self): self.assertRaises(TypeError, utils.split, 1) def test_binary_encode_and_decode(self): self.assertEqual("text", utils.binary_decode(utils.binary_encode("text"))) def test_binary_encode_invalid_type(self): self.assertRaises(TypeError, utils.binary_encode, 1234) def test_binary_encode_binary_type(self): binary = utils.binary_encode("text") self.assertEqual(binary, utils.binary_encode(binary)) def test_binary_decode_invalid_type(self): self.assertRaises(TypeError, utils.binary_decode, 1234) def test_binary_decode_text_type(self): self.assertEqual("text", utils.binary_decode("text")) def test_generate_hmac(self): hmac_key = "secrete" data = "my data" h = hmac.new(utils.binary_encode(hmac_key), digestmod=hashlib.sha1) h.update(utils.binary_encode(data)) self.assertEqual(h.hexdigest(), utils.generate_hmac(data, hmac_key)) def test_signed_pack_unpack(self): hmac = "secret" data = {"some": "data"} packed_data, hmac_data = utils.signed_pack(data, hmac) process_data = utils.signed_unpack(packed_data, hmac_data, [hmac]) self.assertIn("hmac_key", process_data) process_data.pop("hmac_key") self.assertEqual(data, process_data) def test_signed_pack_unpack_many_keys(self): keys = ["secret", "secret2", "secret3"] data = {"some": "data"} packed_data, hmac_data = utils.signed_pack(data, keys[-1]) process_data = utils.signed_unpack(packed_data, hmac_data, keys) self.assertEqual(keys[-1], process_data["hmac_key"]) def test_signed_pack_unpack_many_wrong_keys(self): keys = ["secret", "secret2", "secret3"] data = {"some": "data"} packed_data, hmac_data = utils.signed_pack(data, "password") process_data = utils.signed_unpack(packed_data, hmac_data, keys) self.assertIsNone(process_data) def test_signed_unpack_wrong_key(self): data = {"some": "data"} packed_data, hmac_data = utils.signed_pack(data, "secret") self.assertIsNone(utils.signed_unpack(packed_data, hmac_data, "wrong")) def test_signed_unpack_no_key_or_hmac_data(self): data = {"some": "data"} packed_data, hmac_data = utils.signed_pack(data, "secret") self.assertIsNone(utils.signed_unpack(packed_data, hmac_data, None)) self.assertIsNone(utils.signed_unpack(packed_data, None, "secret")) self.assertIsNone(utils.signed_unpack(packed_data, " ", "secret")) @mock.patch("osprofiler._utils.generate_hmac") def test_singed_unpack_generate_hmac_failed(self, mock_generate_hmac): mock_generate_hmac.side_effect = Exception self.assertIsNone(utils.signed_unpack("data", "hmac_data", "hmac_key")) def test_signed_unpack_invalid_json(self): hmac = "secret" data = base64.urlsafe_b64encode(utils.binary_encode("not_a_json")) hmac_data = utils.generate_hmac(data, hmac) self.assertIsNone(utils.signed_unpack(data, hmac_data, hmac)) def test_itersubclasses(self): class A(object): pass class B(A): pass class C(A): pass class D(C): pass self.assertEqual([B, C, D], list(utils.itersubclasses(A))) class E(type): pass self.assertEqual([], list(utils.itersubclasses(E))) osprofiler-1.15.2/osprofiler/tests/unit/__init__.py0000666000175100017510000000000013241117762022422 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/doc/0000775000175100017510000000000013241120161021051 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/doc/__init__.py0000666000175100017510000000000013241117762023167 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/doc/test_specs.py0000666000175100017510000001062113241117762023616 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import glob import os import re import docutils.core from osprofiler.tests import test class TitlesTestCase(test.TestCase): specs_path = os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, "doc", "specs") def _get_title(self, section_tree): section = {"subtitles": []} for node in section_tree: if node.tagname == "title": section["name"] = node.rawsource elif node.tagname == "section": subsection = self._get_title(node) section["subtitles"].append(subsection["name"]) return section def _get_titles(self, spec): titles = {} for node in spec: if node.tagname == "section": # Note subsection subtitles are thrown away section = self._get_title(node) titles[section["name"]] = section["subtitles"] return titles def _check_titles(self, filename, expect, actual): missing_sections = [x for x in expect.keys() if x not in actual.keys()] extra_sections = [x for x in actual.keys() if x not in expect.keys()] msgs = [] if len(missing_sections) > 0: msgs.append("Missing sections: %s" % missing_sections) if len(extra_sections) > 0: msgs.append("Extra sections: %s" % extra_sections) for section in expect.keys(): missing_subsections = [x for x in expect[section] if x not in actual.get(section, {})] # extra subsections are allowed if len(missing_subsections) > 0: msgs.append("Section '%s' is missing subsections: %s" % (section, missing_subsections)) if len(msgs) > 0: self.fail("While checking '%s':\n %s" % (filename, "\n ".join(msgs))) def _check_lines_wrapping(self, tpl, raw): for i, line in enumerate(raw.split("\n")): if "http://" in line or "https://" in line: continue self.assertTrue( len(line) < 80, msg="%s:%d: Line limited to a maximum of 79 characters." % (tpl, i+1)) def _check_no_cr(self, tpl, raw): matches = re.findall("\r", raw) self.assertEqual( len(matches), 0, "Found %s literal carriage returns in file %s" % (len(matches), tpl)) def _check_trailing_spaces(self, tpl, raw): for i, line in enumerate(raw.split("\n")): trailing_spaces = re.findall(" +$", line) self.assertEqual( len(trailing_spaces), 0, "Found trailing spaces on line %s of %s" % (i+1, tpl)) def test_template(self): with open(os.path.join(self.specs_path, "template.rst")) as f: template = f.read() spec = docutils.core.publish_doctree(template) template_titles = self._get_titles(spec) for d in ["implemented", "in-progress"]: spec_dir = "%s/%s" % (self.specs_path, d) self.assertTrue(os.path.isdir(spec_dir), "%s is not a directory" % spec_dir) for filename in glob.glob(spec_dir + "/*"): if filename.endswith("README.rst"): continue self.assertTrue( filename.endswith(".rst"), "spec's file must have .rst ext. Found: %s" % filename) with open(filename) as f: data = f.read() titles = self._get_titles(docutils.core.publish_doctree(data)) self._check_titles(filename, template_titles, titles) self._check_lines_wrapping(filename, data) self._check_no_cr(filename, data) self._check_trailing_spaces(filename, data) osprofiler-1.15.2/osprofiler/tests/unit/test_profiler.py0000666000175100017510000005012613241117762023562 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import copy import datetime import re import mock import six from osprofiler import profiler from osprofiler.tests import test class ProfilerGlobMethodsTestCase(test.TestCase): def test_get_profiler_not_inited(self): profiler._clean() self.assertIsNone(profiler.get()) def test_get_profiler_and_init(self): p = profiler.init("secret", base_id="1", parent_id="2") self.assertEqual(profiler.get(), p) self.assertEqual(p.get_base_id(), "1") # NOTE(boris-42): until we make first start we don't have self.assertEqual(p.get_id(), "2") def test_start_not_inited(self): profiler._clean() profiler.start("name") def test_start(self): p = profiler.init("secret", base_id="1", parent_id="2") p.start = mock.MagicMock() profiler.start("name", info="info") p.start.assert_called_once_with("name", info="info") def test_stop_not_inited(self): profiler._clean() profiler.stop() def test_stop(self): p = profiler.init("secret", base_id="1", parent_id="2") p.stop = mock.MagicMock() profiler.stop(info="info") p.stop.assert_called_once_with(info="info") class ProfilerTestCase(test.TestCase): def test_profiler_get_base_id(self): prof = profiler._Profiler("secret", base_id="1", parent_id="2") self.assertEqual(prof.get_base_id(), "1") @mock.patch("osprofiler.profiler.uuidutils.generate_uuid") def test_profiler_get_parent_id(self, mock_generate_uuid): mock_generate_uuid.return_value = "42" prof = profiler._Profiler("secret", base_id="1", parent_id="2") prof.start("test") self.assertEqual(prof.get_parent_id(), "2") @mock.patch("osprofiler.profiler.uuidutils.generate_uuid") def test_profiler_get_base_id_unset_case(self, mock_generate_uuid): mock_generate_uuid.return_value = "42" prof = profiler._Profiler("secret") self.assertEqual(prof.get_base_id(), "42") self.assertEqual(prof.get_parent_id(), "42") @mock.patch("osprofiler.profiler.uuidutils.generate_uuid") def test_profiler_get_id(self, mock_generate_uuid): mock_generate_uuid.return_value = "43" prof = profiler._Profiler("secret") prof.start("test") self.assertEqual(prof.get_id(), "43") @mock.patch("osprofiler.profiler.datetime") @mock.patch("osprofiler.profiler.uuidutils.generate_uuid") @mock.patch("osprofiler.profiler.notifier.notify") def test_profiler_start(self, mock_notify, mock_generate_uuid, mock_datetime): mock_generate_uuid.return_value = "44" now = datetime.datetime.utcnow() mock_datetime.datetime.utcnow.return_value = now info = {"some": "info"} payload = { "name": "test-start", "base_id": "1", "parent_id": "2", "trace_id": "44", "info": info, "timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"), } prof = profiler._Profiler("secret", base_id="1", parent_id="2") prof.start("test", info=info) mock_notify.assert_called_once_with(payload) @mock.patch("osprofiler.profiler.datetime") @mock.patch("osprofiler.profiler.notifier.notify") def test_profiler_stop(self, mock_notify, mock_datetime): now = datetime.datetime.utcnow() mock_datetime.datetime.utcnow.return_value = now prof = profiler._Profiler("secret", base_id="1", parent_id="2") prof._trace_stack.append("44") prof._name.append("abc") info = {"some": "info"} prof.stop(info=info) payload = { "name": "abc-stop", "base_id": "1", "parent_id": "2", "trace_id": "44", "info": info, "timestamp": now.strftime("%Y-%m-%dT%H:%M:%S.%f"), } mock_notify.assert_called_once_with(payload) self.assertEqual(len(prof._name), 0) self.assertEqual(prof._trace_stack, collections.deque(["1", "2"])) def test_profiler_hmac(self): hmac = "secret" prof = profiler._Profiler(hmac, base_id="1", parent_id="2") self.assertEqual(hmac, prof.hmac_key) class WithTraceTestCase(test.TestCase): @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_with_trace(self, mock_start, mock_stop): with profiler.Trace("a", info="a1"): mock_start.assert_called_once_with("a", info="a1") mock_start.reset_mock() with profiler.Trace("b", info="b1"): mock_start.assert_called_once_with("b", info="b1") mock_stop.assert_called_once_with() mock_stop.reset_mock() mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_with_trace_etype(self, mock_start, mock_stop): def foo(): with profiler.Trace("foo"): raise ValueError("bar") self.assertRaises(ValueError, foo) mock_start.assert_called_once_with("foo", info=None) mock_stop.assert_called_once_with(info={"etype": "ValueError"}) @profiler.trace("function", info={"info": "some_info"}) def traced_func(i): return i @profiler.trace("hide_args", hide_args=True) def trace_hide_args_func(a, i=10): return (a, i) @profiler.trace("foo", hide_args=True) def test_fn_exc(): raise ValueError() @profiler.trace("hide_result", hide_result=False) def trace_with_result_func(a, i=10): return (a, i) class TraceDecoratorTestCase(test.TestCase): @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_duplicate_trace_disallow(self, mock_start, mock_stop): @profiler.trace("test") def trace_me(): pass self.assertRaises( ValueError, profiler.trace("test-again", allow_multiple_trace=False), trace_me) @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_with_args(self, mock_start, mock_stop): self.assertEqual(1, traced_func(1)) expected_info = { "info": "some_info", "function": { "name": "osprofiler.tests.unit.test_profiler.traced_func", "args": str((1,)), "kwargs": str({}) } } mock_start.assert_called_once_with("function", info=expected_info) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_without_args(self, mock_start, mock_stop): self.assertEqual((1, 2), trace_hide_args_func(1, i=2)) expected_info = { "function": { "name": "osprofiler.tests.unit.test_profiler" ".trace_hide_args_func" } } mock_start.assert_called_once_with("hide_args", info=expected_info) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_with_exception(self, mock_start, mock_stop): self.assertRaises(ValueError, test_fn_exc) expected_info = { "function": { "name": "osprofiler.tests.unit.test_profiler.test_fn_exc" } } expected_stop_info = {"etype": "ValueError"} mock_start.assert_called_once_with("foo", info=expected_info) mock_stop.assert_called_once_with(info=expected_stop_info) @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_with_result(self, mock_start, mock_stop): self.assertEqual((1, 2), trace_with_result_func(1, i=2)) start_info = { "function": { "name": "osprofiler.tests.unit.test_profiler" ".trace_with_result_func", "args": str((1,)), "kwargs": str({"i": 2}) } } stop_info = { "function": { "result": str((1, 2)) } } mock_start.assert_called_once_with("hide_result", info=start_info) mock_stop.assert_called_once_with(info=stop_info) class FakeTracedCls(object): def method1(self, a, b, c=10): return a + b + c def method2(self, d, e): return d - e def method3(self, g=10, h=20): return g * h def _method(self, i): return i @profiler.trace_cls("rpc", info={"a": 10}) class FakeTraceClassWithInfo(FakeTracedCls): pass @profiler.trace_cls("a", info={"b": 20}, hide_args=True) class FakeTraceClassHideArgs(FakeTracedCls): pass @profiler.trace_cls("rpc", trace_private=True) class FakeTracePrivate(FakeTracedCls): pass class FakeTraceStaticMethodBase(FakeTracedCls): @staticmethod def static_method(arg): return arg @profiler.trace_cls("rpc", trace_static_methods=True) class FakeTraceStaticMethod(FakeTraceStaticMethodBase): pass @profiler.trace_cls("rpc") class FakeTraceStaticMethodSkip(FakeTraceStaticMethodBase): pass class FakeTraceClassMethodBase(FakeTracedCls): @classmethod def class_method(cls, arg): return arg @profiler.trace_cls("rpc") class FakeTraceClassMethodSkip(FakeTraceClassMethodBase): pass def py3_info(info): # NOTE(boris-42): py33 I hate you. info_py3 = copy.deepcopy(info) new_name = re.sub("FakeTrace[^.]*", "FakeTracedCls", info_py3["function"]["name"]) info_py3["function"]["name"] = new_name return info_py3 def possible_mock_calls(name, info): # NOTE(boris-42): py33 I hate you. return [mock.call(name, info=info), mock.call(name, info=py3_info(info))] class TraceClsDecoratorTestCase(test.TestCase): @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_args(self, mock_start, mock_stop): fake_cls = FakeTraceClassWithInfo() self.assertEqual(30, fake_cls.method1(5, 15)) expected_info = { "a": 10, "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTraceClassWithInfo.method1"), "args": str((fake_cls, 5, 15)), "kwargs": str({}) } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("rpc", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_kwargs(self, mock_start, mock_stop): fake_cls = FakeTraceClassWithInfo() self.assertEqual(50, fake_cls.method3(g=5, h=10)) expected_info = { "a": 10, "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTraceClassWithInfo.method3"), "args": str((fake_cls,)), "kwargs": str({"g": 5, "h": 10}) } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("rpc", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_without_private(self, mock_start, mock_stop): fake_cls = FakeTraceClassHideArgs() self.assertEqual(10, fake_cls._method(10)) self.assertFalse(mock_start.called) self.assertFalse(mock_stop.called) @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_without_args(self, mock_start, mock_stop): fake_cls = FakeTraceClassHideArgs() self.assertEqual(40, fake_cls.method1(5, 15, c=20)) expected_info = { "b": 20, "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTraceClassHideArgs.method1"), } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("a", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_private_methods(self, mock_start, mock_stop): fake_cls = FakeTracePrivate() self.assertEqual(5, fake_cls._method(5)) expected_info = { "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTracePrivate._method"), "args": str((fake_cls, 5)), "kwargs": str({}) } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("rpc", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") @test.testcase.skip( "Static method tracing was disabled due the bug. This test should be " "skipped until we find the way to address it.") def test_static(self, mock_start, mock_stop): fake_cls = FakeTraceStaticMethod() self.assertEqual(25, fake_cls.static_method(25)) expected_info = { "function": { # fixme(boris-42): Static methods are treated differently in # Python 2.x and Python 3.x. So in PY2 we # expect to see method4 because method is # static and doesn't have reference to class # - and FakeTraceStatic.method4 in PY3 "name": "osprofiler.tests.unit.test_profiler" ".method4" if six.PY2 else "osprofiler.tests.unit.test_profiler.FakeTraceStatic" ".method4", "args": str((25,)), "kwargs": str({}) } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("rpc", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_static_method_skip(self, mock_start, mock_stop): self.assertEqual(25, FakeTraceStaticMethodSkip.static_method(25)) self.assertFalse(mock_start.called) self.assertFalse(mock_stop.called) @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_class_method_skip(self, mock_start, mock_stop): self.assertEqual("foo", FakeTraceClassMethodSkip.class_method("foo")) self.assertFalse(mock_start.called) self.assertFalse(mock_stop.called) @six.add_metaclass(profiler.TracedMeta) class FakeTraceWithMetaclassBase(object): __trace_args__ = {"name": "rpc", "info": {"a": 10}} def method1(self, a, b, c=10): return a + b + c def method2(self, d, e): return d - e def method3(self, g=10, h=20): return g * h def _method(self, i): return i class FakeTraceDummy(FakeTraceWithMetaclassBase): def method4(self, j): return j class FakeTraceWithMetaclassHideArgs(FakeTraceWithMetaclassBase): __trace_args__ = {"name": "a", "info": {"b": 20}, "hide_args": True} def method5(self, k, l): return k + l class FakeTraceWithMetaclassPrivate(FakeTraceWithMetaclassBase): __trace_args__ = {"name": "rpc", "trace_private": True} def _new_private_method(self, m): return 2 * m class TraceWithMetaclassTestCase(test.TestCase): def test_no_name_exception(self): def define_class_with_no_name(): @six.add_metaclass(profiler.TracedMeta) class FakeTraceWithMetaclassNoName(FakeTracedCls): pass self.assertRaises(TypeError, define_class_with_no_name, 1) @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_args(self, mock_start, mock_stop): fake_cls = FakeTraceWithMetaclassBase() self.assertEqual(30, fake_cls.method1(5, 15)) expected_info = { "a": 10, "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTraceWithMetaclassBase.method1"), "args": str((fake_cls, 5, 15)), "kwargs": str({}) } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("rpc", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_kwargs(self, mock_start, mock_stop): fake_cls = FakeTraceWithMetaclassBase() self.assertEqual(50, fake_cls.method3(g=5, h=10)) expected_info = { "a": 10, "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTraceWithMetaclassBase.method3"), "args": str((fake_cls,)), "kwargs": str({"g": 5, "h": 10}) } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("rpc", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_without_private(self, mock_start, mock_stop): fake_cls = FakeTraceWithMetaclassHideArgs() self.assertEqual(10, fake_cls._method(10)) self.assertFalse(mock_start.called) self.assertFalse(mock_stop.called) @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_without_args(self, mock_start, mock_stop): fake_cls = FakeTraceWithMetaclassHideArgs() self.assertEqual(20, fake_cls.method5(5, 15)) expected_info = { "b": 20, "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTraceWithMetaclassHideArgs.method5") } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("a", expected_info)) mock_stop.assert_called_once_with() @mock.patch("osprofiler.profiler.stop") @mock.patch("osprofiler.profiler.start") def test_private_methods(self, mock_start, mock_stop): fake_cls = FakeTraceWithMetaclassPrivate() self.assertEqual(10, fake_cls._new_private_method(5)) expected_info = { "function": { "name": ("osprofiler.tests.unit.test_profiler" ".FakeTraceWithMetaclassPrivate._new_private_method"), "args": str((fake_cls, 5)), "kwargs": str({}) } } self.assertEqual(1, len(mock_start.call_args_list)) self.assertIn(mock_start.call_args_list[0], possible_mock_calls("rpc", expected_info)) mock_stop.assert_called_once_with() osprofiler-1.15.2/osprofiler/tests/unit/drivers/0000775000175100017510000000000013241120161021762 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/drivers/test_loginsight.py0000666000175100017510000003121113241117762025557 0ustar zuulzuul00000000000000# Copyright (c) 2016 VMware, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import ddt import mock from osprofiler.drivers import loginsight from osprofiler import exc from osprofiler.tests import test @ddt.ddt class LogInsightDriverTestCase(test.TestCase): BASE_ID = "8d28af1e-acc0-498c-9890-6908e33eff5f" def setUp(self): super(LogInsightDriverTestCase, self).setUp() self._client = mock.Mock(spec=loginsight.LogInsightClient) self._project = "cinder" self._service = "osapi_volume" self._host = "ubuntu" with mock.patch.object(loginsight, "LogInsightClient", return_value=self._client): self._driver = loginsight.LogInsightDriver( "loginsight://username:password@host", project=self._project, service=self._service, host=self._host) @mock.patch.object(loginsight, "LogInsightClient") def test_init(self, client_class): client = mock.Mock() client_class.return_value = client loginsight.LogInsightDriver("loginsight://username:password@host") client_class.assert_called_once_with("host", "username", "password") client.login.assert_called_once_with() @ddt.data("loginsight://username@host", "loginsight://username:p@ssword@host", "loginsight://us:rname:password@host") def test_init_with_invalid_connection_string(self, conn_str): self.assertRaises(ValueError, loginsight.LogInsightDriver, conn_str) @mock.patch.object(loginsight, "LogInsightClient") def test_init_with_special_chars_in_conn_str(self, client_class): client = mock.Mock() client_class.return_value = client loginsight.LogInsightDriver("loginsight://username:p%40ssword@host") client_class.assert_called_once_with("host", "username", "p@ssword") client.login.assert_called_once_with() def test_get_name(self): self.assertEqual("loginsight", self._driver.get_name()) def _create_trace(self, name, timestamp, parent_id="8d28af1e-acc0-498c-9890-6908e33eff5f", base_id=BASE_ID, trace_id="e465db5c-9672-45a1-b90b-da918f30aef6"): return {"parent_id": parent_id, "name": name, "base_id": base_id, "trace_id": trace_id, "timestamp": timestamp, "info": {"host": self._host}} def _create_start_trace(self): return self._create_trace("wsgi-start", "2016-10-04t11:50:21.902303") def _create_stop_trace(self): return self._create_trace("wsgi-stop", "2016-10-04t11:50:30.123456") @mock.patch("json.dumps") def test_notify(self, dumps): json_str = mock.sentinel.json_str dumps.return_value = json_str trace = self._create_stop_trace() self._driver.notify(trace) trace["project"] = self._project trace["service"] = self._service exp_event = {"text": "OSProfiler trace", "fields": [{"name": "base_id", "content": trace["base_id"]}, {"name": "trace_id", "content": trace["trace_id"]}, {"name": "project", "content": trace["project"]}, {"name": "service", "content": trace["service"]}, {"name": "name", "content": trace["name"]}, {"name": "trace", "content": json_str}] } self._client.send_event.assert_called_once_with(exp_event) @mock.patch.object(loginsight.LogInsightDriver, "_append_results") @mock.patch.object(loginsight.LogInsightDriver, "_parse_results") def test_get_report(self, parse_results, append_results): start_trace = self._create_start_trace() start_trace["project"] = self._project start_trace["service"] = self._service stop_trace = self._create_stop_trace() stop_trace["project"] = self._project stop_trace["service"] = self._service resp = {"events": [{"text": "OSProfiler trace", "fields": [{"name": "trace", "content": json.dumps(start_trace) } ] }, {"text": "OSProfiler trace", "fields": [{"name": "trace", "content": json.dumps(stop_trace) } ] } ] } self._client.query_events = mock.Mock(return_value=resp) self._driver.get_report(self.BASE_ID) self._client.query_events.assert_called_once_with({"base_id": self.BASE_ID}) append_results.assert_has_calls( [mock.call(start_trace["trace_id"], start_trace["parent_id"], start_trace["name"], start_trace["project"], start_trace["service"], start_trace["info"]["host"], start_trace["timestamp"], start_trace), mock.call(stop_trace["trace_id"], stop_trace["parent_id"], stop_trace["name"], stop_trace["project"], stop_trace["service"], stop_trace["info"]["host"], stop_trace["timestamp"], stop_trace) ]) parse_results.assert_called_once_with() class LogInsightClientTestCase(test.TestCase): def setUp(self): super(LogInsightClientTestCase, self).setUp() self._host = "localhost" self._username = "username" self._password = "password" self._client = loginsight.LogInsightClient( self._host, self._username, self._password) self._client._session_id = "4ff800d1-3175-4b49-9209-39714ea56416" def test_check_response_login_timeout(self): resp = mock.Mock(status_code=440) self.assertRaises( exc.LogInsightLoginTimeout, self._client._check_response, resp) def test_check_response_api_error(self): resp = mock.Mock(status_code=401, ok=False) resp.text = json.dumps( {"errorMessage": "Invalid username or password.", "errorCode": "FIELD_ERROR"}) e = self.assertRaises( exc.LogInsightAPIError, self._client._check_response, resp) self.assertEqual("Invalid username or password.", str(e)) @mock.patch("requests.Request") @mock.patch("json.dumps") @mock.patch.object(loginsight.LogInsightClient, "_check_response") def test_send_request(self, check_resp, json_dumps, request_class): req = mock.Mock() request_class.return_value = req prep_req = mock.sentinel.prep_req req.prepare = mock.Mock(return_value=prep_req) data = mock.sentinel.data json_dumps.return_value = data self._client._session = mock.Mock() resp = mock.Mock() self._client._session.send = mock.Mock(return_value=resp) resp_json = mock.sentinel.resp_json resp.json = mock.Mock(return_value=resp_json) header = {"X-LI-Session-Id": "foo"} body = mock.sentinel.body params = mock.sentinel.params ret = self._client._send_request( "get", "https", "api/v1/events", header, body, params) self.assertEqual(resp_json, ret) exp_headers = {"X-LI-Session-Id": "foo", "content-type": "application/json"} request_class.assert_called_once_with( "get", "https://localhost:9543/api/v1/events", headers=exp_headers, data=data, params=mock.sentinel.params) self._client._session.send.assert_called_once_with(prep_req, verify=False) check_resp.assert_called_once_with(resp) @mock.patch.object(loginsight.LogInsightClient, "_send_request") def test_is_current_session_active_with_active_session(self, send_request): self.assertTrue(self._client._is_current_session_active()) exp_header = {"X-LI-Session-Id": self._client._session_id} send_request.assert_called_once_with( "get", "https", "api/v1/sessions/current", headers=exp_header) @mock.patch.object(loginsight.LogInsightClient, "_send_request") def test_is_current_session_active_with_expired_session(self, send_request): send_request.side_effect = exc.LogInsightLoginTimeout self.assertFalse(self._client._is_current_session_active()) send_request.assert_called_once_with( "get", "https", "api/v1/sessions/current", headers={"X-LI-Session-Id": self._client._session_id}) @mock.patch.object(loginsight.LogInsightClient, "_is_current_session_active", return_value=True) @mock.patch.object(loginsight.LogInsightClient, "_send_request") def test_login_with_current_session_active(self, send_request, is_current_session_active): self._client.login() is_current_session_active.assert_called_once_with() send_request.assert_not_called() @mock.patch.object(loginsight.LogInsightClient, "_is_current_session_active", return_value=False) @mock.patch.object(loginsight.LogInsightClient, "_send_request") def test_login(self, send_request, is_current_session_active): new_session_id = "569a80aa-be5c-49e5-82c1-bb62392d2667" resp = {"sessionId": new_session_id} send_request.return_value = resp self._client.login() is_current_session_active.assert_called_once_with() exp_body = {"username": self._username, "password": self._password} send_request.assert_called_once_with( "post", "https", "api/v1/sessions", body=exp_body) self.assertEqual(new_session_id, self._client._session_id) @mock.patch.object(loginsight.LogInsightClient, "_send_request") def test_send_event(self, send_request): event = mock.sentinel.event self._client.send_event(event) exp_body = {"events": [event]} exp_path = ("api/v1/events/ingest/%s" % self._client.LI_OSPROFILER_AGENT_ID) send_request.assert_called_once_with( "post", "http", exp_path, body=exp_body) @mock.patch.object(loginsight.LogInsightClient, "_send_request") def test_query_events(self, send_request): resp = mock.sentinel.response send_request.return_value = resp self.assertEqual(resp, self._client.query_events({"foo": "bar"})) exp_header = {"X-LI-Session-Id": self._client._session_id} exp_params = {"limit": 20000, "timeout": self._client._query_timeout} send_request.assert_called_once_with( "get", "https", "api/v1/events/foo/CONTAINS+bar/timestamp/GT+0", headers=exp_header, params=exp_params) @mock.patch.object(loginsight.LogInsightClient, "_send_request") @mock.patch.object(loginsight.LogInsightClient, "login") def test_query_events_with_session_expiry(self, login, send_request): resp = mock.sentinel.response send_request.side_effect = [exc.LogInsightLoginTimeout, resp] self.assertEqual(resp, self._client.query_events({"foo": "bar"})) login.assert_called_once_with() exp_header = {"X-LI-Session-Id": self._client._session_id} exp_params = {"limit": 20000, "timeout": self._client._query_timeout} exp_send_request_call = mock.call( "get", "https", "api/v1/events/foo/CONTAINS+bar/timestamp/GT+0", headers=exp_header, params=exp_params) send_request.assert_has_calls([exp_send_request_call]*2) osprofiler-1.15.2/osprofiler/tests/unit/drivers/test_base.py0000666000175100017510000001005113241117762024321 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from osprofiler.drivers import base from osprofiler.tests import test class NotifierBaseTestCase(test.TestCase): def test_factory(self): class A(base.Driver): @classmethod def get_name(cls): return "a" def notify(self, a): return a self.assertEqual(10, base.get_driver("a://").notify(10)) def test_factory_with_args(self): class B(base.Driver): def __init__(self, c_str, a, b=10): self.a = a self.b = b @classmethod def get_name(cls): return "b" def notify(self, c): return self.a + self.b + c self.assertEqual(22, base.get_driver("b://", 5, b=7).notify(10)) def test_driver_not_found(self): self.assertRaises(ValueError, base.get_driver, "Driver not found for connection string: " "nonexisting://") def test_build_empty_tree(self): class C(base.Driver): @classmethod def get_name(cls): return "c" self.assertEqual([], base.get_driver("c://")._build_tree({})) def test_build_complex_tree(self): class D(base.Driver): @classmethod def get_name(cls): return "d" test_input = { "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}}, "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}}, "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}}, "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}}, "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}}, "113": {"parent_id": "11", "trace_id": "113", "info": {"started": 3}}, "112": {"parent_id": "11", "trace_id": "112", "info": {"started": 2}}, "114": {"parent_id": "11", "trace_id": "114", "info": {"started": 5}} } expected_output = [ { "parent_id": "0", "trace_id": "1", "info": {"started": 0}, "children": [ { "parent_id": "1", "trace_id": "11", "info": {"started": 1}, "children": [ {"parent_id": "11", "trace_id": "112", "info": {"started": 2}, "children": []}, {"parent_id": "11", "trace_id": "113", "info": {"started": 3}, "children": []}, {"parent_id": "11", "trace_id": "114", "info": {"started": 5}, "children": []} ] } ] }, { "parent_id": "0", "trace_id": "2", "info": {"started": 1}, "children": [ {"parent_id": "2", "trace_id": "21", "info": {"started": 6}, "children": []}, {"parent_id": "2", "trace_id": "22", "info": {"started": 7}, "children": []} ] } ] self.assertEqual( expected_output, base.get_driver("d://")._build_tree(test_input)) osprofiler-1.15.2/osprofiler/tests/unit/drivers/__init__.py0000666000175100017510000000000013241117762024100 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/drivers/test_ceilometer.py0000666000175100017510000003665613241120010025536 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from osprofiler.drivers.ceilometer import Ceilometer from osprofiler.tests import test class CeilometerParserTestCase(test.TestCase): def setUp(self): super(CeilometerParserTestCase, self).setUp() self.ceilometer = Ceilometer("ceilometer://", ceilometer_api_version="2") def test_build_empty_tree(self): self.assertEqual([], self.ceilometer._build_tree({})) def test_build_complex_tree(self): test_input = { "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}}, "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}}, "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}}, "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}}, "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}}, "113": {"parent_id": "11", "trace_id": "113", "info": {"started": 3}}, "112": {"parent_id": "11", "trace_id": "112", "info": {"started": 2}}, "114": {"parent_id": "11", "trace_id": "114", "info": {"started": 5}} } expected_output = [ { "parent_id": "0", "trace_id": "1", "info": {"started": 0}, "children": [ { "parent_id": "1", "trace_id": "11", "info": {"started": 1}, "children": [ {"parent_id": "11", "trace_id": "112", "info": {"started": 2}, "children": []}, {"parent_id": "11", "trace_id": "113", "info": {"started": 3}, "children": []}, {"parent_id": "11", "trace_id": "114", "info": {"started": 5}, "children": []} ] } ] }, { "parent_id": "0", "trace_id": "2", "info": {"started": 1}, "children": [ {"parent_id": "2", "trace_id": "21", "info": {"started": 6}, "children": []}, {"parent_id": "2", "trace_id": "22", "info": {"started": 7}, "children": []} ] } ] result = self.ceilometer._build_tree(test_input) self.assertEqual(expected_output, result) def test_get_report_empty(self): self.ceilometer.client = mock.MagicMock() self.ceilometer.client.events.list.return_value = [] expected = { "info": { "name": "total", "started": 0, "finished": None, "last_trace_started": None }, "children": [], "stats": {}, } base_id = "10" self.assertEqual(expected, self.ceilometer.get_report(base_id)) def test_get_report(self): self.ceilometer.client = mock.MagicMock() results = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock.MagicMock(), mock.MagicMock()] self.ceilometer.client.events.list.return_value = results results[0].to_dict.return_value = { "traits": [ { "type": "string", "name": "base_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "host", "value": "ubuntu" }, { "type": "string", "name": "method", "value": "POST" }, { "type": "string", "name": "name", "value": "wsgi-start" }, { "type": "string", "name": "parent_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "project", "value": "keystone" }, { "type": "string", "name": "service", "value": "main" }, { "type": "string", "name": "timestamp", "value": "2015-12-23T14:02:22.338776" }, { "type": "string", "name": "trace_id", "value": "06320327-2c2c-45ae-923a-515de890276a" } ], "raw": {}, "generated": "2015-12-23T10:41:38.415793", "event_type": "profiler.main", "message_id": "65fc1553-3082-4a6f-9d1e-0e3183f57a47"} results[1].to_dict.return_value = { "traits": [ { "type": "string", "name": "base_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "host", "value": "ubuntu" }, { "type": "string", "name": "name", "value": "wsgi-stop" }, { "type": "string", "name": "parent_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "project", "value": "keystone" }, { "type": "string", "name": "service", "value": "main" }, { "type": "string", "name": "timestamp", "value": "2015-12-23T14:02:22.380405" }, { "type": "string", "name": "trace_id", "value": "016c97fd-87f3-40b2-9b55-e431156b694b" } ], "raw": {}, "generated": "2015-12-23T10:41:38.406052", "event_type": "profiler.main", "message_id": "3256d9f1-48ba-4ac5-a50b-64fa42c6e264"} results[2].to_dict.return_value = { "traits": [ { "type": "string", "name": "base_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "db.params", "value": "[]" }, { "type": "string", "name": "db.statement", "value": "SELECT 1" }, { "type": "string", "name": "host", "value": "ubuntu" }, { "type": "string", "name": "name", "value": "db-start" }, { "type": "string", "name": "parent_id", "value": "06320327-2c2c-45ae-923a-515de890276a" }, { "type": "string", "name": "project", "value": "keystone" }, { "type": "string", "name": "service", "value": "main" }, { "type": "string", "name": "timestamp", "value": "2015-12-23T14:02:22.395365" }, { "type": "string", "name": "trace_id", "value": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a" } ], "raw": {}, "generated": "2015-12-23T10:41:38.984161", "event_type": "profiler.main", "message_id": "60368aa4-16f0-4f37-a8fb-89e92fdf36ff"} results[3].to_dict.return_value = { "traits": [ { "type": "string", "name": "base_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "host", "value": "ubuntu" }, { "type": "string", "name": "name", "value": "db-stop" }, { "type": "string", "name": "parent_id", "value": "06320327-2c2c-45ae-923a-515de890276a" }, { "type": "string", "name": "project", "value": "keystone" }, { "type": "string", "name": "service", "value": "main" }, { "type": "string", "name": "timestamp", "value": "2015-12-23T14:02:22.415486" }, { "type": "string", "name": "trace_id", "value": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a" } ], "raw": {}, "generated": "2015-12-23T10:41:39.019378", "event_type": "profiler.main", "message_id": "3fbeb339-55c5-4f28-88e4-15bee251dd3d"} results[4].to_dict.return_value = { "traits": [ { "type": "string", "name": "base_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "host", "value": "ubuntu" }, { "type": "string", "name": "method", "value": "GET" }, { "type": "string", "name": "name", "value": "wsgi-start" }, { "type": "string", "name": "parent_id", "value": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "type": "string", "name": "project", "value": "keystone" }, { "type": "string", "name": "service", "value": "main" }, { "type": "string", "name": "timestamp", "value": "2015-12-23T14:02:22.427444" }, { "type": "string", "name": "trace_id", "value": "016c97fd-87f3-40b2-9b55-e431156b694b" } ], "raw": {}, "generated": "2015-12-23T10:41:38.360409", "event_type": "profiler.main", "message_id": "57b971a9-572f-4f29-9838-3ed2564c6b5b"} expected = {"children": [ {"children": [{"children": [], "info": {"finished": 76, "host": "ubuntu", "meta.raw_payload.db-start": {}, "meta.raw_payload.db-stop": {}, "name": "db", "project": "keystone", "service": "main", "started": 56, "exception": "None"}, "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"} ], "info": {"finished": 0, "host": "ubuntu", "meta.raw_payload.wsgi-start": {}, "name": "wsgi", "project": "keystone", "service": "main", "started": 0}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "06320327-2c2c-45ae-923a-515de890276a"}, {"children": [], "info": {"finished": 41, "host": "ubuntu", "meta.raw_payload.wsgi-start": {}, "meta.raw_payload.wsgi-stop": {}, "name": "wsgi", "project": "keystone", "service": "main", "started": 88, "exception": "None"}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}], "info": { "finished": 88, "name": "total", "started": 0, "last_trace_started": 88 }, "stats": {"db": {"count": 1, "duration": 20}, "wsgi": {"count": 2, "duration": -47}}, } base_id = "10" result = self.ceilometer.get_report(base_id) expected_filter = [{"field": "base_id", "op": "eq", "value": base_id}] self.ceilometer.client.events.list.assert_called_once_with( expected_filter, limit=100000) self.assertEqual(expected, result) osprofiler-1.15.2/osprofiler/tests/unit/drivers/test_messaging.py0000666000175100017510000000465213241117762025376 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from osprofiler.drivers import base from osprofiler.tests import test class MessagingTestCase(test.TestCase): @mock.patch("oslo_utils.importutils.try_import") def test_init_no_oslo_messaging(self, try_import_mock): try_import_mock.return_value = None self.assertRaises( ValueError, base.get_driver, "messaging://", project="project", service="service", host="host", context={}) @mock.patch("oslo_utils.importutils.try_import") def test_init_and_notify(self, try_import_mock): context = "context" transport = "transport" project = "project" service = "service" host = "host" # emulate dynamic load of oslo.messaging library oslo_messaging_mock = mock.Mock() try_import_mock.return_value = oslo_messaging_mock # mock oslo.messaging APIs notifier_mock = mock.Mock() oslo_messaging_mock.Notifier.return_value = notifier_mock oslo_messaging_mock.get_notification_transport.return_value = transport notify_func = base.get_driver( "messaging://", project=project, service=service, context=context, host=host).notify oslo_messaging_mock.Notifier.assert_called_once_with( transport, publisher_id=host, driver="messaging", topics=["profiler"], retry=0) info = { "a": 10, "project": project, "service": service, "host": host } notify_func(info) notifier_mock.info.assert_called_once_with( context, "profiler.service", info) notifier_mock.reset_mock() notify_func(info, context="my_context") notifier_mock.info.assert_called_once_with( "my_context", "profiler.service", info) osprofiler-1.15.2/osprofiler/tests/unit/drivers/test_redis_driver.py0000666000175100017510000003326413241117762026103 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # Copyright 2016 IBM Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_serialization import jsonutils from osprofiler.drivers.redis_driver import Redis from osprofiler.tests import test class RedisParserTestCase(test.TestCase): def setUp(self): super(RedisParserTestCase, self).setUp() self.redisdb = Redis("redis://localhost:6379") def test_build_empty_tree(self): self.assertEqual([], self.redisdb._build_tree({})) def test_build_complex_tree(self): test_input = { "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}}, "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}}, "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}}, "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}}, "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}}, "113": {"parent_id": "11", "trace_id": "113", "info": {"started": 3}}, "112": {"parent_id": "11", "trace_id": "112", "info": {"started": 2}}, "114": {"parent_id": "11", "trace_id": "114", "info": {"started": 5}} } expected_output = [ { "parent_id": "0", "trace_id": "1", "info": {"started": 0}, "children": [ { "parent_id": "1", "trace_id": "11", "info": {"started": 1}, "children": [ {"parent_id": "11", "trace_id": "112", "info": {"started": 2}, "children": []}, {"parent_id": "11", "trace_id": "113", "info": {"started": 3}, "children": []}, {"parent_id": "11", "trace_id": "114", "info": {"started": 5}, "children": []} ] } ] }, { "parent_id": "0", "trace_id": "2", "info": {"started": 1}, "children": [ {"parent_id": "2", "trace_id": "21", "info": {"started": 6}, "children": []}, {"parent_id": "2", "trace_id": "22", "info": {"started": 7}, "children": []} ] } ] result = self.redisdb._build_tree(test_input) self.assertEqual(expected_output, result) def test_get_report_empty(self): self.redisdb.db = mock.MagicMock() self.redisdb.db.scan_iter.return_value = [] expected = { "info": { "name": "total", "started": 0, "finished": None, "last_trace_started": None }, "children": [], "stats": {}, } base_id = "10" self.assertEqual(expected, self.redisdb.get_report(base_id)) def test_get_report(self): self.redisdb.db = mock.MagicMock() result_elements = [ { "info": { "project": None, "host": "ubuntu", "request": { "path": "/v2/a322b5049d224a90bf8786c644409400/volumes", "scheme": "http", "method": "POST", "query": "" }, "service": None }, "name": "wsgi-start", "service": "main", "timestamp": "2015-12-23T14:02:22.338776", "trace_id": "06320327-2c2c-45ae-923a-515de890276a", "project": "keystone", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "service": None }, "name": "wsgi-stop", "service": "main", "timestamp": "2015-12-23T14:02:22.380405", "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf", "project": "keystone", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "db": { "params": { }, "statement": "SELECT 1" }, "service": None }, "name": "db-start", "service": "main", "timestamp": "2015-12-23T14:02:22.395365", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a", "project": "keystone", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "service": None }, "name": "db-stop", "service": "main", "timestamp": "2015-12-23T14:02:22.415486", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a", "project": "keystone", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "request": { "path": "/v2/a322b5049d224a90bf8786c644409400/volumes", "scheme": "http", "method": "GET", "query": "" }, "service": None }, "name": "wsgi-start", "service": "main", "timestamp": "2015-12-23T14:02:22.427444", "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b", "project": "keystone", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }] results = {result["base_id"] + "_" + result["trace_id"] + "_" + result["timestamp"]: result for result in result_elements} expected = {"children": [{"children": [{ "children": [], "info": {"finished": 76, "host": "ubuntu", "meta.raw_payload.db-start": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"db": {"params": {}, "statement": "SELECT 1"}, "host": "ubuntu", "project": None, "service": None}, "name": "db-start", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.395365", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}, "meta.raw_payload.db-stop": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "service": None}, "name": "db-stop", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.415486", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}, "name": "db", "project": "keystone", "service": "main", "started": 56, "exception": "None"}, "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}], "info": {"finished": 0, "host": "ubuntu", "meta.raw_payload.wsgi-start": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "request": {"method": "POST", "path": "/v2/a322b5049d224a90bf8" "786c644409400/volumes", "query": "", "scheme": "http"}, "service": None}, "name": "wsgi-start", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.338776", "trace_id": "06320327-2c2c-45ae-923a-515de890276a"}, "name": "wsgi", "project": "keystone", "service": "main", "started": 0}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "06320327-2c2c-45ae-923a-515de890276a"}, {"children": [], "info": {"finished": 41, "host": "ubuntu", "meta.raw_payload.wsgi-stop": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "service": None}, "name": "wsgi-stop", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.380405", "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"}, "name": "wsgi", "project": "keystone", "service": "main", "started": 41, "exception": "None"}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"}, {"children": [], "info": {"finished": 88, "host": "ubuntu", "meta.raw_payload.wsgi-start": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "request": {"method": "GET", "path": "/v2/a322b5049d224a90bf" "8786c644409400/volumes", "query": "", "scheme": "http"}, "service": None}, "name": "wsgi-start", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.427444", "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}, "name": "wsgi", "project": "keystone", "service": "main", "started": 88}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}], "info": { "finished": 88, "name": "total", "started": 0, "last_trace_started": 88 }, "stats": {"db": {"count": 1, "duration": 20}, "wsgi": {"count": 3, "duration": 0}}} self.redisdb.db.scan_iter.return_value = list(results.keys()) def side_effect(*args, **kwargs): return jsonutils.dumps(results[args[0]]) self.redisdb.db.get.side_effect = side_effect base_id = "10" result = self.redisdb.get_report(base_id) expected_filter = self.redisdb.namespace + "10*" self.redisdb.db.scan_iter.assert_called_once_with( match=expected_filter) self.assertEqual(expected, result) osprofiler-1.15.2/osprofiler/tests/unit/drivers/test_elasticsearch.py0000666000175100017510000001044713241117762026232 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from osprofiler.drivers.elasticsearch_driver import ElasticsearchDriver from osprofiler.tests import test class ElasticsearchTestCase(test.TestCase): def setUp(self): super(ElasticsearchTestCase, self).setUp() self.elasticsearch = ElasticsearchDriver("elasticsearch://localhost") self.elasticsearch.project = "project" self.elasticsearch.service = "service" def test_init_and_notify(self): self.elasticsearch.client = mock.MagicMock() self.elasticsearch.client.reset_mock() project = "project" service = "service" host = "host" info = { "a": 10, "project": project, "service": service, "host": host } self.elasticsearch.notify(info) self.elasticsearch.client\ .index.assert_called_once_with(index="osprofiler-notifications", doc_type="notification", body=info) def test_get_empty_report(self): self.elasticsearch.client = mock.MagicMock() self.elasticsearch.client.search = mock\ .MagicMock(return_value={"_scroll_id": "1", "hits": {"hits": []}}) self.elasticsearch.client.reset_mock() get_report = self.elasticsearch.get_report base_id = "abacaba" get_report(base_id) self.elasticsearch.client\ .search.assert_called_once_with(index="osprofiler-notifications", doc_type="notification", size=10000, scroll="2m", body={"query": { "match": {"base_id": base_id}} }) def test_get_non_empty_report(self): base_id = "1" elasticsearch_first_response = { "_scroll_id": "1", "hits": { "hits": [ { "_source": { "timestamp": "2016-08-10T16:58:03.064438", "base_id": base_id, "project": "project", "service": "service", "parent_id": "0", "name": "test", "info": { "host": "host" }, "trace_id": "1" } } ]}} elasticsearch_second_response = { "_scroll_id": base_id, "hits": {"hits": []}} self.elasticsearch.client = mock.MagicMock() self.elasticsearch.client.search = \ mock.MagicMock(return_value=elasticsearch_first_response) self.elasticsearch.client.scroll = \ mock.MagicMock(return_value=elasticsearch_second_response) self.elasticsearch.client.reset_mock() self.elasticsearch.get_report(base_id) self.elasticsearch.client\ .search.assert_called_once_with(index="osprofiler-notifications", doc_type="notification", size=10000, scroll="2m", body={"query": { "match": {"base_id": base_id}} }) self.elasticsearch.client\ .scroll.assert_called_once_with(scroll_id=base_id, scroll="2m") osprofiler-1.15.2/osprofiler/tests/unit/drivers/test_mongodb.py0000666000175100017510000003243713241117762025050 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from osprofiler.drivers.mongodb import MongoDB from osprofiler.tests import test class MongoDBParserTestCase(test.TestCase): def setUp(self): super(MongoDBParserTestCase, self).setUp() self.mongodb = MongoDB("mongodb://localhost") def test_build_empty_tree(self): self.assertEqual([], self.mongodb._build_tree({})) def test_build_complex_tree(self): test_input = { "2": {"parent_id": "0", "trace_id": "2", "info": {"started": 1}}, "1": {"parent_id": "0", "trace_id": "1", "info": {"started": 0}}, "21": {"parent_id": "2", "trace_id": "21", "info": {"started": 6}}, "22": {"parent_id": "2", "trace_id": "22", "info": {"started": 7}}, "11": {"parent_id": "1", "trace_id": "11", "info": {"started": 1}}, "113": {"parent_id": "11", "trace_id": "113", "info": {"started": 3}}, "112": {"parent_id": "11", "trace_id": "112", "info": {"started": 2}}, "114": {"parent_id": "11", "trace_id": "114", "info": {"started": 5}} } expected_output = [ { "parent_id": "0", "trace_id": "1", "info": {"started": 0}, "children": [ { "parent_id": "1", "trace_id": "11", "info": {"started": 1}, "children": [ {"parent_id": "11", "trace_id": "112", "info": {"started": 2}, "children": []}, {"parent_id": "11", "trace_id": "113", "info": {"started": 3}, "children": []}, {"parent_id": "11", "trace_id": "114", "info": {"started": 5}, "children": []} ] } ] }, { "parent_id": "0", "trace_id": "2", "info": {"started": 1}, "children": [ {"parent_id": "2", "trace_id": "21", "info": {"started": 6}, "children": []}, {"parent_id": "2", "trace_id": "22", "info": {"started": 7}, "children": []} ] } ] result = self.mongodb._build_tree(test_input) self.assertEqual(expected_output, result) def test_get_report_empty(self): self.mongodb.db = mock.MagicMock() self.mongodb.db.profiler.find.return_value = [] expected = { "info": { "name": "total", "started": 0, "finished": None, "last_trace_started": None }, "children": [], "stats": {}, } base_id = "10" self.assertEqual(expected, self.mongodb.get_report(base_id)) def test_get_report(self): self.mongodb.db = mock.MagicMock() results = [ { "info": { "project": None, "host": "ubuntu", "request": { "path": "/v2/a322b5049d224a90bf8786c644409400/volumes", "scheme": "http", "method": "POST", "query": "" }, "service": None }, "name": "wsgi-start", "service": "main", "timestamp": "2015-12-23T14:02:22.338776", "trace_id": "06320327-2c2c-45ae-923a-515de890276a", "project": "keystone", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "service": None }, "name": "wsgi-stop", "service": "main", "timestamp": "2015-12-23T14:02:22.380405", "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf", "project": "keystone", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "db": { "params": { }, "statement": "SELECT 1" }, "service": None }, "name": "db-start", "service": "main", "timestamp": "2015-12-23T14:02:22.395365", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a", "project": "keystone", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "service": None }, "name": "db-stop", "service": "main", "timestamp": "2015-12-23T14:02:22.415486", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a", "project": "keystone", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }, { "info": { "project": None, "host": "ubuntu", "request": { "path": "/v2/a322b5049d224a90bf8786c644409400/volumes", "scheme": "http", "method": "GET", "query": "" }, "service": None }, "name": "wsgi-start", "service": "main", "timestamp": "2015-12-23T14:02:22.427444", "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b", "project": "keystone", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4" }] expected = {"children": [{"children": [{ "children": [], "info": {"finished": 76, "host": "ubuntu", "meta.raw_payload.db-start": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"db": {"params": {}, "statement": "SELECT 1"}, "host": "ubuntu", "project": None, "service": None}, "name": "db-start", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.395365", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}, "meta.raw_payload.db-stop": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "service": None}, "name": "db-stop", "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.415486", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}, "name": "db", "project": "keystone", "service": "main", "started": 56, "exception": "None"}, "parent_id": "06320327-2c2c-45ae-923a-515de890276a", "trace_id": "1baf1d24-9ca9-4f4c-bd3f-01b7e0c0735a"}], "info": {"finished": 0, "host": "ubuntu", "meta.raw_payload.wsgi-start": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "request": {"method": "POST", "path": "/v2/a322b5049d224a90bf8" "786c644409400/volumes", "query": "", "scheme": "http"}, "service": None}, "name": "wsgi-start", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.338776", "trace_id": "06320327-2c2c-45ae-923a-515de890276a"}, "name": "wsgi", "project": "keystone", "service": "main", "started": 0}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "06320327-2c2c-45ae-923a-515de890276a"}, {"children": [], "info": {"finished": 41, "host": "ubuntu", "meta.raw_payload.wsgi-stop": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "service": None}, "name": "wsgi-stop", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.380405", "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"}, "name": "wsgi", "project": "keystone", "service": "main", "started": 41, "exception": "None"}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "839ca3f1-afcb-45be-a4a1-679124c552bf"}, {"children": [], "info": {"finished": 88, "host": "ubuntu", "meta.raw_payload.wsgi-start": { "base_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "info": {"host": "ubuntu", "project": None, "request": {"method": "GET", "path": "/v2/a322b5049d224a90bf" "8786c644409400/volumes", "query": "", "scheme": "http"}, "service": None}, "name": "wsgi-start", "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "project": "keystone", "service": "main", "timestamp": "2015-12-23T14:02:22.427444", "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}, "name": "wsgi", "project": "keystone", "service": "main", "started": 88}, "parent_id": "7253ca8c-33b3-4f84-b4f1-f5a4311ddfa4", "trace_id": "016c97fd-87f3-40b2-9b55-e431156b694b"}], "info": { "finished": 88, "name": "total", "started": 0, "last_trace_started": 88 }, "stats": {"db": {"count": 1, "duration": 20}, "wsgi": {"count": 3, "duration": 0}}} self.mongodb.db.profiler.find.return_value = results base_id = "10" result = self.mongodb.get_report(base_id) expected_filter = [{"base_id": base_id}, {"_id": 0}] self.mongodb.db.profiler.find.assert_called_once_with( *expected_filter) self.assertEqual(expected, result) osprofiler-1.15.2/osprofiler/tests/unit/test_notifier.py0000666000175100017510000000303613241117762023555 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from osprofiler import notifier from osprofiler.tests import test class NotifierTestCase(test.TestCase): def tearDown(self): notifier.set(notifier._noop_notifier) # restore defaults super(NotifierTestCase, self).tearDown() def test_set(self): def test(info): pass notifier.set(test) self.assertEqual(notifier.get(), test) def test_get_default_notifier(self): self.assertEqual(notifier.get(), notifier._noop_notifier) def test_notify(self): m = mock.MagicMock() notifier.set(m) notifier.notify(10) m.assert_called_once_with(10) @mock.patch("osprofiler.notifier.base.get_driver") def test_create(self, mock_factory): result = notifier.create("test", 10, b=20) mock_factory.assert_called_once_with("test", 10, b=20) self.assertEqual(mock_factory.return_value.notify, result) osprofiler-1.15.2/osprofiler/tests/unit/cmd/0000775000175100017510000000000013241120161021047 5ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/cmd/__init__.py0000666000175100017510000000000013241117762023165 0ustar zuulzuul00000000000000osprofiler-1.15.2/osprofiler/tests/unit/cmd/test_shell.py0000666000175100017510000002035113241120010023563 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import sys import ddt import mock import six from osprofiler.cmd import shell from osprofiler import exc from osprofiler.tests import test @ddt.ddt class ShellTestCase(test.TestCase): TRACE_ID = "c598094d-bbee-40b6-b317-d76003b679d3" def setUp(self): super(ShellTestCase, self).setUp() self.old_environment = os.environ.copy() os.environ = { "OS_USERNAME": "username", "OS_USER_ID": "user_id", "OS_PASSWORD": "password", "OS_USER_DOMAIN_ID": "user_domain_id", "OS_USER_DOMAIN_NAME": "user_domain_name", "OS_PROJECT_DOMAIN_ID": "project_domain_id", "OS_PROJECT_DOMAIN_NAME": "project_domain_name", "OS_PROJECT_ID": "project_id", "OS_PROJECT_NAME": "project_name", "OS_TENANT_ID": "tenant_id", "OS_TENANT_NAME": "tenant_name", "OS_AUTH_URL": "http://127.0.0.1:5000/v3/", "OS_AUTH_TOKEN": "pass", "OS_CACERT": "/path/to/cacert", "OS_SERVICE_TYPE": "service_type", "OS_ENDPOINT_TYPE": "public", "OS_REGION_NAME": "test" } self.ceiloclient = mock.MagicMock() sys.modules["ceilometerclient"] = self.ceiloclient self.addCleanup(sys.modules.pop, "ceilometerclient", None) ceilo_modules = ["client", "shell"] for module in ceilo_modules: sys.modules["ceilometerclient.%s" % module] = getattr( self.ceiloclient, module) self.addCleanup( sys.modules.pop, "ceilometerclient.%s" % module, None) def tearDown(self): super(ShellTestCase, self).tearDown() os.environ = self.old_environment def _trace_show_cmd(self, format_=None): cmd = "trace show %s" % self.TRACE_ID return cmd if format_ is None else "%s --%s" % (cmd, format_) @mock.patch("sys.stdout", six.StringIO()) @mock.patch("osprofiler.cmd.shell.OSProfilerShell") def test_shell_main(self, mock_shell): mock_shell.side_effect = exc.CommandError("some_message") shell.main() self.assertEqual("some_message\n", sys.stdout.getvalue()) def run_command(self, cmd): shell.OSProfilerShell(cmd.split()) def _test_with_command_error(self, cmd, expected_message): try: self.run_command(cmd) except exc.CommandError as actual_error: self.assertEqual(str(actual_error), expected_message) else: raise ValueError( "Expected: `osprofiler.exc.CommandError` is raised with " "message: '%s'." % expected_message) def test_trace_show_ceilometerclient_is_missed(self): sys.modules["ceilometerclient"] = None sys.modules["ceilometerclient.client"] = None sys.modules["ceilometerclient.shell"] = None msg = ("To use this command, you should install " "'ceilometerclient' manually. Use command:\n " "'pip install python-ceilometerclient'.") self._test_with_command_error(self._trace_show_cmd(), msg) def test_trace_show_unauthorized(self): class FakeHTTPUnauthorized(Exception): http_status = 401 self.ceiloclient.client.get_client.side_effect = FakeHTTPUnauthorized msg = "Invalid OpenStack Identity credentials." self._test_with_command_error(self._trace_show_cmd(), msg) def test_trace_show_unknown_error(self): self.ceiloclient.client.get_client.side_effect = Exception("test") msg = "Error occurred while connecting to Ceilometer: test." self._test_with_command_error(self._trace_show_cmd(), msg) @mock.patch("osprofiler.drivers.ceilometer.Ceilometer.get_report") def test_trace_show_no_selected_format(self, mock_get): mock_get.return_value = self._create_mock_notifications() msg = ("You should choose one of the following output formats: " "json, html or dot.") self._test_with_command_error(self._trace_show_cmd(), msg) @mock.patch("osprofiler.drivers.ceilometer.Ceilometer.get_report") @ddt.data(None, {"info": {"started": 0, "finished": 1, "name": "total"}, "children": []}) def test_trace_show_trace_id_not_found(self, notifications, mock_get): mock_get.return_value = notifications msg = ("Trace with UUID %s not found. Please check the HMAC key " "used in the command." % self.TRACE_ID) self._test_with_command_error(self._trace_show_cmd(), msg) def _create_mock_notifications(self): notifications = { "info": { "started": 0, "finished": 1, "name": "total" }, "children": [{ "info": { "started": 0, "finished": 1, "name": "total" }, "children": [] }] } return notifications @mock.patch("sys.stdout", six.StringIO()) @mock.patch("osprofiler.drivers.ceilometer.Ceilometer.get_report") def test_trace_show_in_json(self, mock_get): notifications = self._create_mock_notifications() mock_get.return_value = notifications self.run_command(self._trace_show_cmd(format_="json")) self.assertEqual("%s\n" % json.dumps(notifications, indent=2, separators=(",", ": "),), sys.stdout.getvalue()) @mock.patch("sys.stdout", six.StringIO()) @mock.patch("osprofiler.drivers.ceilometer.Ceilometer.get_report") def test_trace_show_in_html(self, mock_get): notifications = self._create_mock_notifications() mock_get.return_value = notifications # NOTE(akurilin): to simplify assert statement, html-template should be # replaced. html_template = ( "A long time ago in a galaxy far, far away..." " some_data = $DATA" "It is a period of civil war. Rebel" "spaceships, striking from a hidden" "base, have won their first victory" "against the evil Galactic Empire.") with mock.patch("osprofiler.cmd.commands.open", mock.mock_open(read_data=html_template), create=True): self.run_command(self._trace_show_cmd(format_="html")) self.assertEqual("A long time ago in a galaxy far, far away..." " some_data = %s" "It is a period of civil war. Rebel" "spaceships, striking from a hidden" "base, have won their first victory" "against the evil Galactic Empire." "\n" % json.dumps(notifications, indent=4, separators=(",", ": ")), sys.stdout.getvalue()) @mock.patch("sys.stdout", six.StringIO()) @mock.patch("osprofiler.drivers.ceilometer.Ceilometer.get_report") def test_trace_show_write_to_file(self, mock_get): notifications = self._create_mock_notifications() mock_get.return_value = notifications with mock.patch("osprofiler.cmd.commands.open", mock.mock_open(), create=True) as mock_open: self.run_command("%s --out='/file'" % self._trace_show_cmd(format_="json")) output = mock_open.return_value.__enter__.return_value output.write.assert_called_once_with( json.dumps(notifications, indent=2, separators=(",", ": "))) osprofiler-1.15.2/osprofiler/tests/unit/test_sqlalchemy.py0000666000175100017510000001455013241117762024103 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock from osprofiler import sqlalchemy from osprofiler.tests import test class SqlalchemyTracingTestCase(test.TestCase): @mock.patch("osprofiler.sqlalchemy.profiler") def test_before_execute(self, mock_profiler): handler = sqlalchemy._before_cursor_execute("sql") handler(mock.MagicMock(), 1, 2, 3, 4, 5) expected_info = {"db": {"statement": 2, "params": 3}} mock_profiler.start.assert_called_once_with("sql", info=expected_info) @mock.patch("osprofiler.sqlalchemy.profiler") def test_after_execute(self, mock_profiler): handler = sqlalchemy._after_cursor_execute() handler(mock.MagicMock(), 1, 2, 3, 4, 5) mock_profiler.stop.assert_called_once_with() @mock.patch("osprofiler.sqlalchemy.profiler") def test_after_execute_with_sql_result(self, mock_profiler): handler = sqlalchemy._after_cursor_execute(hide_result=False) cursor = mock.MagicMock() cursor._rows = (1,) handler(1, cursor, 2, 3, 4, 5) info = { "db": { "result": str(cursor._rows) } } mock_profiler.stop.assert_called_once_with(info=info) @mock.patch("osprofiler.sqlalchemy.profiler") def test_handle_error(self, mock_profiler): original_exception = Exception("error") chained_exception = Exception("error and the reason") sqlalchemy_exception_ctx = mock.MagicMock() sqlalchemy_exception_ctx.original_exception = original_exception sqlalchemy_exception_ctx.chained_exception = chained_exception sqlalchemy.handle_error(sqlalchemy_exception_ctx) expected_info = { "etype": "Exception", "db": { "original_exception": str(original_exception), "chained_exception": str(chained_exception), } } mock_profiler.stop.assert_called_once_with(info=expected_info) @mock.patch("osprofiler.sqlalchemy.handle_error") @mock.patch("osprofiler.sqlalchemy._before_cursor_execute") @mock.patch("osprofiler.sqlalchemy._after_cursor_execute") def test_add_tracing(self, mock_after_exc, mock_before_exc, mock_handle_error): sa = mock.MagicMock() engine = mock.MagicMock() mock_before_exc.return_value = "before" mock_after_exc.return_value = "after" sqlalchemy.add_tracing(sa, engine, "sql") mock_before_exc.assert_called_once_with("sql") # Default set hide_result=True mock_after_exc.assert_called_once_with(hide_result=True) expected_calls = [ mock.call(engine, "before_cursor_execute", "before"), mock.call(engine, "after_cursor_execute", "after"), mock.call(engine, "handle_error", mock_handle_error), ] self.assertEqual(sa.event.listen.call_args_list, expected_calls) @mock.patch("osprofiler.sqlalchemy.handle_error") @mock.patch("osprofiler.sqlalchemy._before_cursor_execute") @mock.patch("osprofiler.sqlalchemy._after_cursor_execute") def test_wrap_session(self, mock_after_exc, mock_before_exc, mock_handle_error): sa = mock.MagicMock() @contextlib.contextmanager def _session(): session = mock.MagicMock() # current engine object stored within the session session.bind = mock.MagicMock() session.bind.traced = None yield session mock_before_exc.return_value = "before" mock_after_exc.return_value = "after" session = sqlalchemy.wrap_session(sa, _session()) with session as sess: pass mock_before_exc.assert_called_once_with("db") # Default set hide_result=True mock_after_exc.assert_called_once_with(hide_result=True) expected_calls = [ mock.call(sess.bind, "before_cursor_execute", "before"), mock.call(sess.bind, "after_cursor_execute", "after"), mock.call(sess.bind, "handle_error", mock_handle_error), ] self.assertEqual(sa.event.listen.call_args_list, expected_calls) @mock.patch("osprofiler.sqlalchemy.handle_error") @mock.patch("osprofiler.sqlalchemy._before_cursor_execute") @mock.patch("osprofiler.sqlalchemy._after_cursor_execute") @mock.patch("osprofiler.profiler") def test_with_sql_result(self, mock_profiler, mock_after_exc, mock_before_exc, mock_handle_error): sa = mock.MagicMock() engine = mock.MagicMock() mock_before_exc.return_value = "before" mock_after_exc.return_value = "after" sqlalchemy.add_tracing(sa, engine, "sql", hide_result=False) mock_before_exc.assert_called_once_with("sql") # Default set hide_result=True mock_after_exc.assert_called_once_with(hide_result=False) expected_calls = [ mock.call(engine, "before_cursor_execute", "before"), mock.call(engine, "after_cursor_execute", "after"), mock.call(engine, "handle_error", mock_handle_error), ] self.assertEqual(sa.event.listen.call_args_list, expected_calls) @mock.patch("osprofiler.sqlalchemy._before_cursor_execute") @mock.patch("osprofiler.sqlalchemy._after_cursor_execute") def test_disable_and_enable(self, mock_after_exc, mock_before_exc): sqlalchemy.disable() sa = mock.MagicMock() engine = mock.MagicMock() sqlalchemy.add_tracing(sa, engine, "sql") self.assertFalse(mock_after_exc.called) self.assertFalse(mock_before_exc.called) sqlalchemy.enable() sqlalchemy.add_tracing(sa, engine, "sql") self.assertTrue(mock_after_exc.called) self.assertTrue(mock_before_exc.called) osprofiler-1.15.2/osprofiler/tests/unit/test_opts.py0000666000175100017510000000550113241117762022722 0ustar zuulzuul00000000000000# Copyright 2016 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from oslo_config import fixture from osprofiler import opts from osprofiler.tests import test class ConfigTestCase(test.TestCase): def setUp(self): super(ConfigTestCase, self).setUp() self.conf_fixture = self.useFixture(fixture.Config()) def test_options_defaults(self): opts.set_defaults(self.conf_fixture.conf) self.assertFalse(self.conf_fixture.conf.profiler.enabled) self.assertFalse(self.conf_fixture.conf.profiler.trace_sqlalchemy) self.assertEqual("SECRET_KEY", self.conf_fixture.conf.profiler.hmac_keys) self.assertFalse(opts.is_trace_enabled(self.conf_fixture.conf)) self.assertFalse(opts.is_db_trace_enabled(self.conf_fixture.conf)) def test_options_defaults_override(self): opts.set_defaults(self.conf_fixture.conf, enabled=True, trace_sqlalchemy=True, hmac_keys="MY_KEY") self.assertTrue(self.conf_fixture.conf.profiler.enabled) self.assertTrue(self.conf_fixture.conf.profiler.trace_sqlalchemy) self.assertEqual("MY_KEY", self.conf_fixture.conf.profiler.hmac_keys) self.assertTrue(opts.is_trace_enabled(self.conf_fixture.conf)) self.assertTrue(opts.is_db_trace_enabled(self.conf_fixture.conf)) @mock.patch("osprofiler.web.enable") @mock.patch("osprofiler.web.disable") def test_web_trace_disabled(self, mock_disable, mock_enable): opts.set_defaults(self.conf_fixture.conf, hmac_keys="MY_KEY") opts.enable_web_trace(self.conf_fixture.conf) opts.disable_web_trace(self.conf_fixture.conf) self.assertEqual(0, mock_enable.call_count) self.assertEqual(0, mock_disable.call_count) @mock.patch("osprofiler.web.enable") @mock.patch("osprofiler.web.disable") def test_web_trace_enabled(self, mock_disable, mock_enable): opts.set_defaults(self.conf_fixture.conf, enabled=True, hmac_keys="MY_KEY") opts.enable_web_trace(self.conf_fixture.conf) opts.disable_web_trace(self.conf_fixture.conf) mock_enable.assert_called_once_with("MY_KEY") mock_disable.assert_called_once_with() osprofiler-1.15.2/osprofiler/tests/unit/test_initializer.py0000666000175100017510000000313513241117762024261 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools from osprofiler import initializer class InitializerTestCase(testtools.TestCase): @mock.patch("osprofiler.notifier.set") @mock.patch("osprofiler.notifier.create") @mock.patch("osprofiler.web.enable") def test_initializer(self, web_enable_mock, notifier_create_mock, notifier_set_mock): conf = mock.Mock() conf.profiler.connection_string = "driver://" conf.profiler.hmac_keys = "hmac_keys" context = {} project = "my-project" service = "my-service" host = "my-host" notifier_mock = mock.Mock() notifier_create_mock.return_value = notifier_mock initializer.init_from_conf(conf, context, project, service, host) notifier_create_mock.assert_called_once_with( "driver://", context=context, project=project, service=service, host=host, conf=conf) notifier_set_mock.assert_called_once_with(notifier_mock) web_enable_mock.assert_called_once_with("hmac_keys") osprofiler-1.15.2/osprofiler/tests/test.py0000666000175100017510000000176013241117762020701 0ustar zuulzuul00000000000000# Copyright 2014 Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import sys from testtools import testcase class TestCase(testcase.TestCase): """Test case base class for all osprofiler unit tests.""" pass class FunctionalTestCase(TestCase): """Base for functional tests""" def setUp(self): super(FunctionalTestCase, self).setUp() logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) osprofiler-1.15.2/setup.cfg0000666000175100017510000000204613241120161015624 0ustar zuulzuul00000000000000[metadata] name = osprofiler summary = OpenStack Profiler Library description-file = README.rst author = OpenStack author-email = openstack-dev@lists.openstack.org home-page = https://docs.openstack.org/osprofiler/latest/ classifier = Environment :: OpenStack Intended Audience :: Developers Intended Audience :: Information Technology License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: 2 Programming Language :: Python :: 2.7 Programming Language :: Python :: 3.5 [files] packages = osprofiler [global] setup-hooks = pbr.hooks.setup_hook [extras] oslo_config = oslo.config>=3.2.0 # Apache-2.0 [build_sphinx] all_files = 1 build-dir = doc/build source-dir = doc/source warning-is-error = 1 [entry_points] oslo.config.opts = osprofiler = osprofiler.opts:list_opts console_scripts = osprofiler = osprofiler.cmd.shell:main paste.filter_factory = osprofiler = osprofiler.web:WsgiMiddleware.factory [egg_info] tag_build = tag_date = 0 osprofiler-1.15.2/tools/0000775000175100017510000000000013241120161015137 5ustar zuulzuul00000000000000osprofiler-1.15.2/tools/patch_tox_venv.py0000666000175100017510000000311613241117762020560 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import sys import install_venv_common as install_venv # noqa def first_file(file_list): for candidate in file_list: if os.path.exists(candidate): return candidate def main(argv): root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) venv = os.environ['VIRTUAL_ENV'] pip_requires = first_file([ os.path.join(root, 'requirements.txt'), os.path.join(root, 'tools', 'pip-requires'), ]) test_requires = first_file([ os.path.join(root, 'test-requirements.txt'), os.path.join(root, 'tools', 'test-requires'), ]) py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) project = 'oslo' install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, py_version, project) #NOTE(dprince): For Tox we only run post_process (which patches files, etc) install.post_process() if __name__ == '__main__': main(sys.argv) osprofiler-1.15.2/tools/lint.py0000666000175100017510000000167613241117762016510 0ustar zuulzuul00000000000000# Copyright (c) 2013 Intel Corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # @author: Zhongyue Luo, Intel Corporation. # import sys from pylint import lint ENABLED_PYLINT_MSGS = ['W0611'] def main(dirpath): enable_opt = '--enable=%s' % ','.join(ENABLED_PYLINT_MSGS) lint.Run(['--reports=n', '--disable=all', enable_opt, dirpath]) if __name__ == '__main__': main(sys.argv[1]) osprofiler-1.15.2/doc/0000775000175100017510000000000013241120161014544 5ustar zuulzuul00000000000000osprofiler-1.15.2/doc/source/0000775000175100017510000000000013241120161016044 5ustar zuulzuul00000000000000osprofiler-1.15.2/doc/source/user/0000775000175100017510000000000013241120161017022 5ustar zuulzuul00000000000000osprofiler-1.15.2/doc/source/user/collectors.rst0000666000175100017510000000253013241117762021744 0ustar zuulzuul00000000000000========== Collectors ========== There are a number of drivers to support different collector backends: Redis ----- * Overview The Redis driver allows profiling data to be collected into a redis database instance. The traces are stored as key-value pairs where the key is a string built using trace ids and timestamps and the values are JSON strings containing the trace information. A second driver is included to use Redis Sentinel in addition to single node Redis. * Capabilities * Write trace data to the database. * Query Traces in database: This allows for pulling trace data querying on the keys used to save the data in the database. * Generate a report based on the traces stored in the database. * Supports use of Redis Sentinel for robustness. * Usage The driver is used by OSProfiler when using a connection-string URL of the form redis://:. To use the Sentinel version use a connection-string of the form redissentinel://: * Configuration * No config changes are required by for the base Redis driver. * There are two configuration options for the Redis Sentinel driver: * socket_timeout: specifies the sentinel connection socket timeout value. Defaults to: 0.1 seconds * sentinel_service_name: The name of the Sentinel service to use. Defaults to: "mymaster" osprofiler-1.15.2/doc/source/user/history.rst0000666000175100017510000000004013241117762021266 0ustar zuulzuul00000000000000.. include:: ../../../ChangeLog osprofiler-1.15.2/doc/source/user/similar_projects.rst0000666000175100017510000000102313241117762023140 0ustar zuulzuul00000000000000================ Similar projects ================ Other projects (some alive, some abandoned, some research prototypes) that are similar (in idea and ideal to OSprofiler). * `Zipkin`_ * `Dapper`_ * `Tomograph`_ * `HTrace`_ * `Jaeger`_ * `OpenTracing`_ .. _Zipkin: http://zipkin.io/ .. _Dapper: http://research.google.com/pubs/pub36356.html .. _Tomograph: https://github.com/stackforge/tomograph .. _HTrace: https://htrace.incubator.apache.org/ .. _Jaeger: https://uber.github.io/jaeger/ .. _OpenTracing: http://opentracing.io/ osprofiler-1.15.2/doc/source/user/integration.rst0000666000175100017510000001226113241117762022120 0ustar zuulzuul00000000000000=========== Integration =========== There are 4 topics related to integration OSprofiler & `OpenStack`_: What we should use as a centralized collector? ---------------------------------------------- We primarily decided to use `Ceilometer`_, because: * It's already integrated in OpenStack, so it's quite simple to send notifications to it from all projects. * There is an OpenStack API in Ceilometer that allows us to retrieve all messages related to one trace. Take a look at *osprofiler.drivers.ceilometer.Ceilometer:get_report* In OSProfiler starting with 1.4.0 version other options (MongoDB driver in 1.4.0 release, Elasticsearch driver added later, etc.) are also available. How to setup profiler notifier? ------------------------------- We primarily decided to use oslo.messaging Notifier API, because: * `oslo.messaging`_ is integrated in all projects * It's the simplest way to send notification to Ceilometer, take a look at: *osprofiler.drivers.messaging.Messaging:notify* method * We don't need to add any new `CONF`_ options in projects In OSProfiler starting with 1.4.0 version other options (MongoDB driver in 1.4.0 release, Elasticsearch driver added later, etc.) are also available. How to initialize profiler, to get one trace across all services? ----------------------------------------------------------------- To enable cross service profiling we actually need to do send from caller to callee (base_id & trace_id). So callee will be able to init its profiler with these values. In case of OpenStack there are 2 kinds of interaction between 2 services: * REST API It's well known that there are python clients for every project, that generate proper HTTP requests, and parse responses to objects. These python clients are used in 2 cases: * User access -> OpenStack * Service from Project 1 would like to access Service from Project 2 So what we need is to: * Put in python clients headers with trace info (if profiler is inited) * Add `OSprofiler WSGI middleware`_ to your service, this initializes the profiler, if and only if there are special trace headers, that are signed by one of the HMAC keys from api-paste.ini (if multiple keys exist the signing process will continue to use the key that was accepted during validation). * The common items that are used to configure the middleware are the following (these can be provided when initializing the middleware object or when setting up the api-paste.ini file):: hmac_keys = KEY1, KEY2 (can be a single key as well) Actually the algorithm is a bit more complex. The Python client will also sign the trace info with a `HMAC`_ key (lets call that key ``A``) passed to profiler.init, and on reception the WSGI middleware will check that it's signed with *one of* the HMAC keys (the wsgi server should have key ``A`` as well, but may also have keys ``B`` and ``C``) that are specified in api-paste.ini. This ensures that only the user that knows the HMAC key ``A`` in api-paste.ini can init a profiler properly and send trace info that will be actually processed. This ensures that trace info that is sent in that does **not** pass the HMAC validation will be discarded. **NOTE:** The application of many possible *validation* keys makes it possible to roll out a key upgrade in a non-impactful manner (by adding a key into the list and rolling out that change and then removing the older key at some time in the future). * RPC API RPC calls are used for interaction between services of one project. It's well known that projects are using `oslo.messaging`_ to deal with RPC. It's very good, because projects deal with RPC in similar way. So there are 2 required changes: * On callee side put in request context trace info (if profiler was initialized) * On caller side initialize profiler, if there is trace info in request context. * Trace all methods of callee API (can be done via profiler.trace_cls). What points should be tracked by default? ----------------------------------------- I think that for all projects we should include by default 5 kinds of points: * All HTTP calls - helps to get information about: what HTTP requests were done, duration of calls (latency of service), information about projects involved in request. * All RPC calls - helps to understand duration of parts of request related to different services in one project. This information is essential to understand which service produce the bottleneck. * All DB API calls - in some cases slow DB query can produce bottleneck. So it's quite useful to track how much time request spend in DB layer. * All driver calls - in case of nova, cinder and others we have vendor drivers. Duration * ALL SQL requests (turned off by default, because it produce a lot of traffic) .. _CONF: https://docs.openstack.org/oslo.config/latest/ .. _HMAC: https://en.wikipedia.org/wiki/Hash-based_message_authentication_code .. _OpenStack: https://www.openstack.org/ .. _Ceilometer: https://wiki.openstack.org/wiki/Ceilometer .. _oslo.messaging: https://pypi.python.org/pypi/oslo.messaging .. _OSprofiler WSGI middleware: https://github.com/openstack/osprofiler/blob/master/osprofiler/web.py osprofiler-1.15.2/doc/source/user/api.rst0000666000175100017510000001615213241120010020325 0ustar zuulzuul00000000000000=== API === There are few things that you should know about API before using it. Five ways to add a new trace point. ----------------------------------- .. code-block:: python from osprofiler import profiler def some_func(): profiler.start("point_name", {"any_key": "with_any_value"}) # your code profiler.stop({"any_info_about_point": "in_this_dict"}) @profiler.trace("point_name", info={"any_info_about_point": "in_this_dict"}, hide_args=False) def some_func2(*args, **kwargs): # If you need to hide args in profile info, put hide_args=True pass def some_func3(): with profiler.Trace("point_name", info={"any_key": "with_any_value"}): # some code here @profiler.trace_cls("point_name", info={}, hide_args=False, trace_private=False) class TracedClass(object): def traced_method(self): pass def _traced_only_if_trace_private_true(self): pass @six.add_metaclass(profiler.TracedMeta) class RpcManagerClass(object): __trace_args__ = {'name': 'rpc', 'info': None, 'hide_args': False, 'trace_private': False} def my_method(self, some_args): pass def my_method2(self, some_arg1, some_arg2, kw=None, kw2=None) pass How profiler works? ------------------- * **profiler.Trace()** and **@profiler.trace()** are just syntax sugar, that just calls **profiler.start()** & **profiler.stop()** methods. * Every call of **profiler.start()** & **profiler.stop()** sends to **collector** 1 message. It means that every trace point creates 2 records in the collector. *(more about collector & records later)* * Nested trace points are supported. The sample below produces 2 trace points: .. code-block:: python profiler.start("parent_point") profiler.start("child_point") profiler.stop() profiler.stop() The implementation is quite simple. Profiler has one stack that contains ids of all trace points. E.g.: .. code-block:: python profiler.start("parent_point") # trace_stack.push() # send to collector -> trace_stack[-2:] profiler.start("parent_point") # trace_stack.push() # send to collector -> trace_stack[-2:] profiler.stop() # send to collector -> trace_stack[-2:] # trace_stack.pop() profiler.stop() # send to collector -> trace_stack[-2:] # trace_stack.pop() It's simple to build a tree of nested trace points, having **(parent_id, point_id)** of all trace points. Process of sending to collector. -------------------------------- Trace points contain 2 messages (start and stop). Messages like below are sent to a collector: .. parsed-literal:: { "name": -(start|stop) "base_id": , "parent_id": , "trace_id": , "info": } The fields are defined as the following: * base_id - ```` that is equal for all trace points that belong to one trace, this is done to simplify the process of retrieving all trace points related to one trace from collector * parent_id - ```` of parent trace point * trace_id - ```` of current trace point * info - the dictionary that contains user information passed when calling profiler **start()** & **stop()** methods. Setting up the collector. ------------------------- Using OSProfiler notifier. ^^^^^^^^^^^^^^^^^^^^^^^^^^ .. note:: The following way of configuring OSProfiler is deprecated. The new version description is located below - `Using OSProfiler initializer.`_. Don't use OSproliler notifier directly! Its support will be removed soon from OSProfiler. The profiler doesn't include a trace point collector. The user/developer should instead provide a method that sends messages to a collector. Let's take a look at a trivial sample, where the collector is just a file: .. code-block:: python import json from osprofiler import notifier def send_info_to_file_collector(info, context=None): with open("traces", "a") as f: f.write(json.dumps(info)) notifier.set(send_info_to_file_collector) So now on every **profiler.start()** and **profiler.stop()** call we will write info about the trace point to the end of the **traces** file. Using OSProfiler initializer. ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ OSProfiler now contains various storage drivers to collect tracing data. Information about what driver to use and what options to pass to OSProfiler are now stored in OpenStack services configuration files. Example of such configuration can be found below: .. code-block:: bash [profiler] enabled = True trace_sqlalchemy = True hmac_keys = SECRET_KEY connection_string = messaging:// If such configuration is provided, OSProfiler setting up can be processed in following way: .. code-block:: python if CONF.profiler.enabled: osprofiler_initializer.init_from_conf( conf=CONF, context=context.get_admin_context().to_dict(), project="cinder", service=binary, host=host ) Initialization of profiler. --------------------------- If profiler is not initialized, all calls to **profiler.start()** and **profiler.stop()** will be ignored. Initialization is a quite simple procedure. .. code-block:: python from osprofiler import profiler profiler.init("SECRET_HMAC_KEY", base_id=, parent_id=) ``SECRET_HMAC_KEY`` - will be discussed later, because it's related to the integration of OSprofiler & OpenStack. **base_id** and **trace_id** will be used to initialize stack_trace in profiler, e.g. ``stack_trace = [base_id, trace_id]``. OSProfiler CLI. --------------- To make it easier for end users to work with profiler from CLI, OSProfiler has entry point that allows them to retrieve information about traces and present it in human readable from. Available commands: * Help message with all available commands and their arguments: .. parsed-literal:: $ osprofiler -h/--help * OSProfiler version: .. parsed-literal:: $ osprofiler -v/--version * Results of profiling can be obtained in JSON (option: ``--json``) and HTML (option: ``--html``) formats: .. parsed-literal:: $ osprofiler trace show --json/--html hint: option ``--out`` will redirect result of ``osprofiler trace show`` in specified file: .. parsed-literal:: $ osprofiler trace show --json/--html --out /path/to/file * In latest versions of OSProfiler with storage drivers (e.g. MongoDB (URI: ``mongodb://``), Messaging (URI: ``messaging://``), and Ceilometer (URI: ``ceilometer://``)) ``--connection-string`` parameter should be set up: .. parsed-literal:: $ osprofiler trace show --connection-string= --json/--html osprofiler-1.15.2/doc/source/user/index.rst0000666000175100017510000000117113241117762020702 0ustar zuulzuul00000000000000================ Using OSProfiler ================ OSProfiler provides a tiny but powerful library that is used by most (soon to be all) OpenStack projects and their python clients. It provides functionality to generate 1 trace per request, that goes through all involved services. This trace can then be extracted and used to build a tree of calls which can be quite handy for a variety of reasons (for example in isolating cross-project performance issues). .. toctree:: :maxdepth: 2 background api integration collectors similar_projects Release Notes ============= .. toctree:: :maxdepth: 1 history osprofiler-1.15.2/doc/source/user/background.rst0000666000175100017510000000227713241117762021722 0ustar zuulzuul00000000000000========== Background ========== OpenStack consists of multiple projects. Each project, in turn, is composed of multiple services. To process some request, e.g. to boot a virtual machine, OpenStack uses multiple services from different projects. In the case something works too slow, it's extremely complicated to understand what exactly goes wrong and to locate the bottleneck. To resolve this issue, we introduce a tiny but powerful library, **osprofiler**, that is going to be used by all OpenStack projects and their python clients. It generates 1 trace per request, that goes through all involved services, and builds a tree of calls. Why not cProfile and etc? ------------------------- **The scope of this library is quite different:** * We are interested in getting one trace of points from different services, not tracing all Python calls inside one process. * This library should be easy integrable into OpenStack. This means that: * It shouldn't require too many changes in code bases of projects it's integrated with. * We should be able to fully turn it off. * We should be able to keep it turned on in lazy mode in production (e.g. admin should be able to "trace" on request). osprofiler-1.15.2/doc/source/Makefile0000666000175100017510000001517213241117762017531 0ustar zuulzuul00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # User-friendly check for sphinx-build ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) $(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) endif # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/osprofiler.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/osprofiler.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/osprofiler" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/osprofiler" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." osprofiler-1.15.2/doc/source/conf.py0000666000175100017510000001722313241117762017367 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # # OSprofiler documentation build configuration file, created by # sphinx-quickstart on Fri Jan 10 23:19:18 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.extend([ os.path.abspath('../..'), ]) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', 'openstackdocstheme', ] # openstackdocstheme options repository_name = 'openstack/osprofiler' bug_project = 'osprofiler' bug_tag = '' todo_include_todos = True # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'OSprofiler' copyright = u'2016, OpenStack Foundation' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', '%s.tex' % project, u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). #man_pages = [ # ('index', 'rally', u'Rally Documentation', # [u'Rally Team'], 1) #] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'OSprofiler', u'OSprofiler Documentation', u'OSprofiler Team', 'OSprofiler', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' osprofiler-1.15.2/doc/source/index.rst0000666000175100017510000000124713241117762017730 0ustar zuulzuul00000000000000============================================= OSProfiler -- Cross-project profiling library ============================================= OSProfiler provides a tiny but powerful library that is used by most (soon to be all) OpenStack projects and their python clients. It provides functionality to generate 1 trace per request, that goes through all involved services. This trace can then be extracted and used to build a tree of calls which can be quite handy for a variety of reasons (for example in isolating cross-project performance issues). .. toctree:: :maxdepth: 2 user/index .. rubric:: Indices and tables * :ref:`genindex` * :ref:`modindex` * :ref:`search` osprofiler-1.15.2/doc/specs/0000775000175100017510000000000013241120161015661 5ustar zuulzuul00000000000000osprofiler-1.15.2/doc/specs/implemented/0000775000175100017510000000000013241120161020164 5ustar zuulzuul00000000000000osprofiler-1.15.2/doc/specs/implemented/README.rst0000666000175100017510000000044213241117762021672 0ustar zuulzuul00000000000000OSprofiler Implemented Specs ============================ Specs are detailed description of proposed changes in project. Usually they answer on what, why, how to change in project and who is going to work on change. This directory contains files with implemented specs, 1 file is 1 spec. osprofiler-1.15.2/doc/specs/implemented/multi_backend_support.rst0000666000175100017510000000512213241120010025306 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode .. This template should be in ReSTructured text. The filename in the git repository should match the launchpad URL, for example a URL of https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named awesome-thing.rst . Please do not delete any of the sections in this template. If you have nothing to say for a whole section, just write: None For help with syntax, see http://sphinx-doc.org/rest.html To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html ===================== Multi backend support ===================== Make OSProfiler more flexible and production ready. Problem description =================== Currently OSprofiler works only with one backend Ceilometer which actually doesn't work well and adds huge overhead. More over often Ceilometer is not installed/used at all. To resolve this we should add support for different backends like: MongoDB, InfluxDB, ElasticSearch, ... Proposed change =============== And new osprofiler.drivers mechanism, each driver will do 2 things: send notifications and parse all notification in unified tree structure that can be processed by the REST lib. Deprecate osprofiler.notifiers and osprofiler.parsers Change all projects that are using OSprofiler to new model Alternatives ------------ I don't know any good alternative. Implementation ============== Assignee(s) ----------- Primary assignees: dbelova ayelistratov Work Items ---------- To add support of multi backends we should change few places in osprofiler that are hardcoded on Ceilometer: - CLI command ``show``: I believe we should add extra argument "connection_string" which will allow people to specify where is backend. So it will look like: ://[[user[:password]]@[address][:port][/database]] - Merge osprofiler.notifiers and osprofiler.parsers to osprofiler.drivers Notifiers and Parsers are tightly related. Like for MongoDB notifier you should use MongoDB parsers, so there is better solution to keep both in the same place. This change should be done with keeping backward compatibility, in other words we should create separated directory osprofiler.drivers and put first Ceilometer and then start working on other backends. These drivers will be chosen based on connection string - Deprecate osprofiler.notifiers and osprofiler.parsers - Switch all projects to new model with connection string Dependencies ============ - Cinder, Glance, Trove, Heat should be changed osprofiler-1.15.2/doc/specs/implemented/make_paste_ini_config_optional.rst0000666000175100017510000000452513241120010027121 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode .. This template should be in ReSTructured text. The filename in the git repository should match the launchpad URL, for example a URL of https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named awesome-thing.rst . Please do not delete any of the sections in this template. If you have nothing to say for a whole section, just write: None For help with syntax, see http://sphinx-doc.org/rest.html To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html ====================================== Make api-paste.ini Arguments Optional ====================================== Problem description =================== Integration of OSprofiler with OpenStack projects is harder than it should be, it requires keeping part of arguments inside api-paste.ini files and part in projects.conf file. We should make all configuration options from api-paste.ini file optional and add alternative way to configure osprofiler.web.WsgiMiddleware Proposed change =============== Integration of OSprofiler requires 2 changes in api-paste.ini file: - One is adding osprofiler.web.WsgiMiddleware to pipelines: https://github.com/openstack/cinder/blob/master/etc/cinder/api-paste.ini#L13 - Another is to add it's arguments: https://github.com/openstack/cinder/blob/master/etc/cinder/api-paste.ini#L31-L32 so WsgiMiddleware will be correctly initialized here: https://github.com/openstack/osprofiler/blob/51761f375189bdc03b7e72a266ad0950777f32b1/osprofiler/web.py#L64 We should make ``hmac_keys`` and ``enabled`` variable optional, create separated method from initialization of wsgi middleware and cut new release. After that remove Alternatives ------------ None. Implementation ============== Assignee(s) ----------- Primary assignee: dbelova Work Items ---------- - Modify osprofiler.web.WsgiMiddleware to make ``hmac_keys`` optional (done) - Add alternative way to setup osprofiler.web.WsgiMiddleware, e.g. extra argument hmac_keys to enable() method (done) - Cut new release 0.3.1 (tbd) - Fix the code in all projects: remove api-paste.ini arguments and use osprofiler.web.enable with extra argument (tbd) Dependencies ============ - Cinder, Glance, Trove - projects should be fixed osprofiler-1.15.2/doc/specs/template.rst0000666000175100017510000000451713241120010020230 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode .. This template should be in ReSTructured text. The filename in the git repository should match the launchpad URL, for example a URL of https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named awesome-thing.rst . Please do not delete any of the sections in this template. If you have nothing to say for a whole section, just write: None For help with syntax, see http://sphinx-doc.org/rest.html To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html ======================= The title of your Spec ======================= Introduction paragraph -- why are we doing anything? Problem description =================== A detailed description of the problem. Proposed change =============== Here is where you cover the change you propose to make in detail. How do you propose to solve this problem? If this is one part of a larger effort make it clear where this piece ends. In other words, what's the scope of this effort? Include where in the heat tree hierarchy this will reside. Alternatives ------------ This is an optional section, where it does apply we'd just like a demonstration that some thought has been put into why the proposed approach is the best one. Implementation ============== Assignee(s) ----------- Who is leading the writing of the code? Or is this a blueprint where you're throwing it out there to see who picks it up? If more than one person is working on the implementation, please designate the primary author and contact. Primary assignee: Can optionally can list additional ids if they intend on doing substantial implementation work on this blueprint. Work Items ---------- Work items or tasks -- break the feature up into the things that need to be done to implement it. Those parts might end up being done by different people, but we're mostly trying to understand the timeline for implementation. Dependencies ============ - Include specific references to specs and/or blueprints in heat, or in other projects, that this one either depends on or is related to. - Does this feature require any new library dependencies or code otherwise not included in OpenStack? Or does it depend on a specific version of library? osprofiler-1.15.2/doc/specs/README.rst0000666000175100017510000000054213241117762017370 0ustar zuulzuul00000000000000OSProfiler Specs ================ Specs are detailed description of proposed changes in project. Usually they answer on what, why, how to change in project and who is going to work on change. This directory contains 2 subdirectories: - in-progress - These specs are approved, but they are not implemented yet - implemented - Implemented specs archive osprofiler-1.15.2/doc/specs/in-progress/0000775000175100017510000000000013241120161020131 5ustar zuulzuul00000000000000osprofiler-1.15.2/doc/specs/in-progress/better_devstack_integration.rst0000666000175100017510000000263513241120010026440 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode .. This template should be in ReSTructured text. The filename in the git repository should match the launchpad URL, for example a URL of https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named awesome-thing.rst . Please do not delete any of the sections in this template. If you have nothing to say for a whole section, just write: None For help with syntax, see http://sphinx-doc.org/rest.html To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html ============================ Better DevStack Integration ============================ Make it simple to enable OSprofiler like it is simple to enable DEBUG log level Problem description =================== It's hard to turn on OSProfiler in DevStack, you have to change notification_topic and enable Ceilometer and in future do other magic. As well if something is done wrong it's hard to debug Proposed change =============== Make a single argument: PROFILING=True/False Alternatives ------------ Do nothing and keep things hard. Implementation ============== Assignee(s) ----------- Primary assignee: boris-42 Work Items ---------- - Make DevStack plugin for OSprofiler - Configure Ceilometer - Configure services that support OSprofiler Dependencies ============ - DevStack osprofiler-1.15.2/doc/specs/in-progress/integration_testing.rst0000666000175100017510000000306313241120010024740 0ustar zuulzuul00000000000000.. This work is licensed under a Creative Commons Attribution 3.0 Unported License. http://creativecommons.org/licenses/by/3.0/legalcode .. This template should be in ReSTructured text. The filename in the git repository should match the launchpad URL, for example a URL of https://blueprints.launchpad.net/heat/+spec/awesome-thing should be named awesome-thing.rst . Please do not delete any of the sections in this template. If you have nothing to say for a whole section, just write: None For help with syntax, see http://sphinx-doc.org/rest.html To test out your formatting, see http://www.tele3.cz/jbar/rest/rest.html =================== Integration Testing =================== We should create DSVM job that check that proposed changes in OSprofiler don't break projects that are using OSProfiler. Problem description =================== Currently we don't have CI for testing that OSprofiler changes are backward compatible and don't break projects that are using OSprofiler. In other words without this job each time when we are releasing OSProfiler we can break some of OpenStack projects which is quite bad. Proposed change =============== Create DSVM job that will install OSprofiler with proposed patch instead of the latest release and run some basic tests. Alternatives ------------ Do nothing and break the OpenStack.. Implementation ============== Assignee(s) ----------- Primary assignee: Work Items ---------- - Create DSVM job - Run Rally tests to make sure that everything works Dependencies ============ None osprofiler-1.15.2/doc/specs/in-progress/README.rst0000666000175100017510000000046213241117762021641 0ustar zuulzuul00000000000000OSprofiler In-Progress Specs ============================ Specs are detailed description of proposed changes in project. Usually they answer on what, why, how to change in project and who is going to work on change. This directory contains files with accepted by not implemented specs, 1 file is 1 spec. osprofiler-1.15.2/releasenotes/0000775000175100017510000000000013241120161016470 5ustar zuulzuul00000000000000osprofiler-1.15.2/releasenotes/notes/0000775000175100017510000000000013241120161017620 5ustar zuulzuul00000000000000osprofiler-1.15.2/releasenotes/notes/add-reno-996dd44974d53238.yaml0000666000175100017510000000007213241117762024235 0ustar zuulzuul00000000000000--- other: - Introduce reno for deployer release notes. osprofiler-1.15.2/releasenotes/source/0000775000175100017510000000000013241120161017770 5ustar zuulzuul00000000000000osprofiler-1.15.2/releasenotes/source/_templates/0000775000175100017510000000000013241120161022125 5ustar zuulzuul00000000000000osprofiler-1.15.2/releasenotes/source/_templates/.placeholder0000666000175100017510000000000013241117762024415 0ustar zuulzuul00000000000000osprofiler-1.15.2/releasenotes/source/ocata.rst0000666000175100017510000000023013241117762021623 0ustar zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata osprofiler-1.15.2/releasenotes/source/unreleased.rst0000666000175100017510000000016013241117762022665 0ustar zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: osprofiler-1.15.2/releasenotes/source/conf.py0000666000175100017510000002144013241117762021307 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'openstackdocstheme', 'reno.sphinxext', ] # openstackdocstheme options repository_name = 'openstack/osprofiler' bug_project = 'osprofiler' bug_tag = '' # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'osprofiler Release Notes' copyright = u'2016, osprofiler Developers' # Release notes do not need a version in the title, they span # multiple versions. # The full version, including alpha/beta/rc tags. release = '' # The short X.Y version. version = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'osprofilerReleaseNotesDoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'osprofilerReleaseNotes.tex', u'osprofiler Release Notes Documentation', u'osprofiler Developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'osprofilerReleaseNotes', u'osprofiler Release Notes Documentation', [u'osprofiler Developers'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'osprofilerReleaseNotes', u'osprofiler Release Notes Documentation', u'osprofiler Developers', 'osprofilerReleaseNotes', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] osprofiler-1.15.2/releasenotes/source/_static/0000775000175100017510000000000013241120161021416 5ustar zuulzuul00000000000000osprofiler-1.15.2/releasenotes/source/_static/.placeholder0000666000175100017510000000000013241117762023706 0ustar zuulzuul00000000000000osprofiler-1.15.2/releasenotes/source/index.rst0000666000175100017510000000022313241120010021621 0ustar zuulzuul00000000000000========================== osprofiler Release Notes ========================== .. toctree:: :maxdepth: 1 unreleased pike ocata osprofiler-1.15.2/releasenotes/source/pike.rst0000666000175100017510000000021713241117762021471 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike osprofiler-1.15.2/devstack/0000775000175100017510000000000013241120161015603 5ustar zuulzuul00000000000000osprofiler-1.15.2/devstack/lib/0000775000175100017510000000000013241120161016351 5ustar zuulzuul00000000000000osprofiler-1.15.2/devstack/lib/osprofiler0000666000175100017510000000431713241120010020460 0ustar zuulzuul00000000000000#!/bin/bash # lib/osprofiler # Functions to control the configuration and operation of the **OSProfiler** # Save trace setting XTRACE=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- CONF_FILES=( $CINDER_CONF $HEAT_CONF $KEYSTONE_CONF $NOVA_CONF $NEUTRON_CONF $GLANCE_API_CONF $GLANCE_REGISTRY_CONF $TROVE_CONF $TROVE_CONDUCTOR_CONF $TROVE_GUESTAGENT_CONF $TROVE_TASKMANAGER_CONF $SENLIN_CONF $MAGNUM_CONF $ZUN_CONF ) # This will update CEILOMETER_NOTIFICATION_TOPICS in ceilometer.conf file export CEILOMETER_NOTIFICATION_TOPICS=notifications,profiler # Functions # --------- function install_redis() { if is_fedora; then install_package redis python-redis elif is_ubuntu; then install_package redis-server python-redis elif is_suse; then install_package redis python-redis else exit_distro_not_supported "redis installation" fi start_service redis } function install_osprofiler_collector() { if [ -z "$OSPROFILER_COLLECTOR" ]; then OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-"messaging://"} elif [ "$OSPROFILER_COLLECTOR" == "redis" ]; then install_redis OSPROFILER_CONNECTION_STRING=${OSPROFILER_CONNECTION_STRING:-"redis://localhost:6379"} else die $LINENO "OSProfiler collector $OSPROFILER_COLLECTOR is not supported" fi } function configure_osprofiler() { for conf in ${CONF_FILES[@]}; do if [ -f $conf ] then iniset $conf profiler enabled True iniset $conf profiler trace_sqlalchemy True iniset $conf profiler hmac_keys $OSPROFILER_HMAC_KEYS iniset $conf profiler connection_string $OSPROFILER_CONNECTION_STRING fi done # Insert osprofiler filter into Neutron paste configuration if [ -f $Q_API_PASTE_FILE ]; then VAL=$(iniget $Q_API_PASTE_FILE composite:neutronapi_v2_0 keystone) VAL=${VAL/catch_errors/catch_errors osprofiler} iniset $Q_API_PASTE_FILE composite:neutronapi_v2_0 keystone "$VAL" fi if [ -f $CEILOMETER_CONF ] then iniset $CEILOMETER_CONF event store_raw info fi } # Restore xtrace $XTRACE osprofiler-1.15.2/devstack/README.rst0000666000175100017510000000644513241120010017276 0ustar zuulzuul00000000000000================================== Enabling OSProfiler using DevStack ================================== This directory contains the files necessary to run OpenStack with enabled OSProfiler in DevStack. OSProfiler can send trace data into different collectors. There are 2 parameters that control this: * ``OSPROFILER_COLLECTOR`` specifies which collector to install in DevStack. By default OSProfiler plugin does not install anything, thus default messaging driver with Ceilometer storage will be used. Possible values: * ```` - default messaging driver with Ceilometer is used * ``redis`` - Redis is installed The default value of ``OSPROFILER_CONNECTION_STRING`` is set automatically depending on ``OSPROFILER_COLLECTOR`` value. * ``OSPROFILER_CONNECTION_STRING`` specifies which driver is used by OSProfiler. Possible values: * ``messaging://`` - use messaging as trace collector (with the transport configured by oslo.messaging) * ``redis://host:port`` - use Redis as trace storage * ``elasticsearch://host:port`` - use Elasticsearch as trace storage * ``mongodb://host:port`` - use MongoDB as trace storage * ``loginsight://username:password@host`` - use LogInsight as trace collector/storage To configure DevStack and enable OSProfiler edit ``${DEVSTACK_DIR}/local.conf`` file and add the following to ``[[local|localrc]]`` section: * to use Redis collector:: enable_plugin osprofiler https://git.openstack.org/openstack/osprofiler master OSPROFILER_COLLECTOR=redis OSProfiler plugin will install Redis and configure OSProfiler to use Redis driver * to use specified driver:: enable_plugin osprofiler https://git.openstack.org/openstack/osprofiler master OSPROFILER_CONNECTION_STRING= the driver is chosen depending on the value of ``OSPROFILER_CONNECTION_STRING`` variable (refer to the next section for details) * to use default Ceilometer driver:: enable_plugin panko https://git.openstack.org/openstack/panko master enable_plugin ceilometer https://git.openstack.org/openstack/ceilometer master enable_plugin osprofiler https://git.openstack.org/openstack/osprofiler master Note: the order of enabling plugins matters. Run DevStack as normal:: $ ./stack.sh Config variables ---------------- **OSPROFILER_HMAC_KEYS** - a set of HMAC secrets, that are used for triggering of profiling in OpenStack services: only the requests that specify one of these keys in HTTP headers will be profiled. E.g. multiple secrets are specified as a comma-separated list of string values:: OSPROFILER_HMAC_KEYS=swordfish,foxtrot,charlie **OSPROFILER_CONNECTION_STRING** - connection string to identify the driver. Default value is ``messaging://`` refers to Ceilometer driver. For a full list of drivers please refer to ``http://git.openstack.org/cgit/openstack/osprofiler/tree/osprofiler/drivers``. Example: enable ElasticSearch driver with the server running on localhost:: OSPROFILER_CONNECTION_STRING=elasticsearch://127.0.0.1:9200 **OSPROFILER_COLLECTOR** - controls which collector to install into DevStack. The driver is then chosen automatically based on the collector. Empty value assumes that the default messaging driver with Ceilometer is used. Example: enable Redis collector:: OSPROFILER_COLLECTOR=redis osprofiler-1.15.2/devstack/settings0000666000175100017510000000046213241117762017407 0ustar zuulzuul00000000000000# Devstack settings # A comma-separated list of secrets, that will be used for triggering # of profiling in OpenStack services: profiling is only performed for # requests that specify one of these keys in HTTP headers. OSPROFILER_HMAC_KEYS=${OSPROFILER_HMAC_KEYS:-"SECRET_KEY"} enable_service osprofiler osprofiler-1.15.2/devstack/plugin.sh0000666000175100017510000000074213241117762017457 0ustar zuulzuul00000000000000#!/bin/bash # DevStack extras script to install osprofiler # Save trace setting XTRACE=$(set +o | grep xtrace) set -o xtrace source $DEST/osprofiler/devstack/lib/osprofiler if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then echo_summary "Configuring system services for OSProfiler" install_osprofiler_collector elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring OSProfiler" configure_osprofiler fi # Restore xtrace $XTRACE osprofiler-1.15.2/README.rst0000666000175100017510000000246213241117762015511 0ustar zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/osprofiler.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on =========================================================== OSProfiler -- Library for cross-project profiling library =========================================================== .. image:: https://img.shields.io/pypi/v/osprofiler.svg :target: https://pypi.python.org/pypi/osprofiler/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/osprofiler.svg :target: https://pypi.python.org/pypi/osprofiler/ :alt: Downloads OSProfiler provides a tiny but powerful library that is used by most (soon to be all) OpenStack projects and their python clients. It provides functionality to be able to generate 1 trace per request, that goes through all involved services. This trace can then be extracted and used to build a tree of calls which can be quite handy for a variety of reasons (for example in isolating cross-project performance issues). * Free software: Apache license * Documentation: https://docs.openstack.org/osprofiler/latest/ * Source: https://git.openstack.org/cgit/openstack/osprofiler * Bugs: https://bugs.launchpad.net/osprofiler osprofiler-1.15.2/tox.ini0000666000175100017510000000257013241117762015335 0ustar zuulzuul00000000000000[tox] minversion = 1.6 skipsdist = True envlist = py35,py27,pep8 [testenv] setenv = VIRTUAL_ENV={envdir} LANG=en_US.UTF-8 LANGUAGE=en_US:en LC_ALL=C deps = .[oslo_config] -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt install_command = pip install -U {opts} {packages} usedevelop = True commands = python setup.py testr --slowest --testr-args='{posargs}' distribute = false [testenv:functional] basepython = python2.7 setenv = {[testenv]setenv} OS_TEST_PATH=./osprofiler/tests/functional deps = {[testenv]deps} oslo.messaging [testenv:functional-py35] basepython = python3.5 setenv = {[testenv:functional]setenv} deps = {[testenv:functional]deps} [testenv:pep8] commands = flake8 # Run security linter bandit -r osprofiler -n5 distribute = false [testenv:venv] commands = {posargs} [testenv:cover] commands = python setup.py testr --coverage --testr-args='{posargs}' [testenv:docs] commands = python setup.py build_sphinx [testenv:bandit] commands = bandit -r osprofiler -n5 [flake8] show-source = true builtins = _ exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,tools,setup.py,build,releasenotes [hacking] local-check-factory = osprofiler.hacking.checks.factory [testenv:releasenotes] commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html osprofiler-1.15.2/bindep.txt0000666000175100017510000000012313241117762016014 0ustar zuulzuul00000000000000rabbitmq-server [test] redis [test platform:rpm] redis-server [test platform:dpkg] osprofiler-1.15.2/ChangeLog0000664000175100017510000002271713241120160015561 0ustar zuulzuul00000000000000CHANGES ======= 1.15.2 ------ * Check profiler instance before initialize * Update .gitreview for stable/queens 1.15.1 ------ 1.15.0 ------ * Update the invalid doc links to the right ones in osprofiler docs * Add filter for OSprofiler html output * Add kwargs to WsgiMiddleware \_\_init\_\_ * Make collector configurable in DevStack plugin * Add functional test for Redis driver * Remove setting of version/release from releasenotes * Add Zuul job for functional testing 1.14.0 ------ * Extend messaging driver to support reporting * Handle and report SQLAlchemy errors 1.13.0 ------ * Remove dependency on oslo.log library 1.12.0 ------ * Do not require OpenStack authentication to run osprofiler CLI * Make dependency on oslo.messaging runtime only * Make test\_notifier independent of test case execution order * Add function/sql results to trace info * Improve unit test coverage * Remove unused parameters from Profiler class * Add loading local static files option of template.html * Update reno for stable/pike 1.11.0 ------ * Update URLs in documents according to document migration * doc: Fix formatting * rearrange existing documentation to fit the new standard layout * Switch from oslosphinx to openstackdocstheme * Enable warning-is-error in doc build * Update .gitignore 1.10.1 ------ * Expose connection\_string parameter into DevStack plugin 1.10.0 ------ * Cleanup code of DevStack plugin * Improve error reporting for Ceilometer driver * Replace oslo.messaging.get\_transport with get\_notification\_transport 1.9.1 ----- * devstack: use project conf file env variables 1.9.0 ----- * Fix error message for invalid trace * Remove unused imports 1.8.0 ----- * Switch to "topics" keyword for messaging driver * Add zun to devstack config * Python 3.4 support is removed 1.7.0 ----- * Highlight last trace for OSprofiler html output 1.6.0 ----- * Add magnum to devstack config * Add Jaeger to list of similar projects * [Fix gate]Update test requirement * fix an outdated link for zipkin * Change some bindings to one-time bindings * Revert "Change list\_opts to dictionary style" * Move implemeted specs to implemented directory * Remove extra white spaces in json output * Increase angular digest iteration limit * devstack: make option hmac\_keys configurable * Update reno for stable/ocata * Change list\_opts to dictionary style * Fix mistake in split meta string * Fix enabling order specify in README.rst 1.5.0 ----- * Add py35 tox virtualenv * Add functional test for notifier backend * Upgrade libraries, add highlight for JSON data * Fix syntax in JS, JSON indent with 4 spaces * Pass oslo.messaging kwargs only for "messaging://" * Organize unit tests under tests/unit folder * Use uuidutils instead of uuid.uuid4() * Use oslo\_utils.uuidutils.is\_uuid\_like * Error out for invalid trace ID * Re-format html template * Replace six.iteritems() with .items() * Show team and repo badges on README * Fix import order * Move hacking checks outside tests * Visualize trace output as graph * Remove print statement * Pretty print json output * Add exception to trace point details * Add a redis driver * Replace logging with oslo\_log * Add Log Insight driver * Add reno for release notes management * Use method constant\_time\_compare from oslo.utils * Update documentation to the latest state * Update dependencies' version from project requirements * Update devstack plugin readme to enable Panko * Enable devstack to configure OSProfiler for Senlin project * Add .idea folder to .gitignore * Heat and Cinder now use new style conf * Fix the issue that ChangeLog not found when building docs * Add AUTHORS and ChangeLog to .gitignore * Update the driver path in th doc * Use an env variable for connection string default * Fix a doc typo * Update homepage with developer documentation page * Trivial: Remove vim header from source files * [doc]Add description for multi-backend URI * Add Elasticsearch driver * Remove old notifiers 1.4.0 ----- * Add tests for mongodb driver * Add connection string usage to osprofiler-cli * Add overall profiler stats by operation * Fix typos on spec directory * Fix title of index page * Add MongoDB driver * OSprofiler initialization method * Add Ceilometer driver * Add backward compatible drivers structure * Expose osprofiler middleware as entrypoint * Remove discover from test-requirements * Fix typo: 'Olso' to 'Oslo' * Don't set html\_last\_updated\_fmt without git * Add exception type to stop trace info 1.3.0 ----- * Add hepler to trace sessions * doc: Log warning when can't get informaiton from git * Add an error tip when trace\_id is not found * Add a similar link with reference to similar projects/libraries * Continue work on standardizing osprofiler docs * Remove dead/broken link to example * Updates to doc conf.py to look the same as other projects * Clean thread local profiler object after usage * Improve unit test coverage * Avoid tracing class and static methods * Avoid multiple tracing when applying meta or class decorator * Remove outdated version * Dont claim copyright for future years * Use pkg\_resources to get version * Enable bandit in gate * Fallback if git is absent * It's unnecessary set deprecate group for option 'enabled' * Add CONTRIBUTING.rst 1.2.0 ----- * Remove flake8 ignore list in tox.ini 1.1.0 ----- * run py34 tests before py27 to work around testr bug * stop making a copy of options discovered by config generator * Make class detection more accurate 1.0.1 ----- * Disable staticmethods tracing 1.0.0 ----- * Add fix for static and class methods in @trace\_cls * Expose X-Trace-\* constants * Add raw Ceilometer events support to DevStack plugin * Use raw data storage for events to collect more info * Use oslo.utils reflection and avoid refinding decorated name * Move osprofiler tests into osprofiler * Consolidate osprofiler options * Remove argparse from requirements * Add py34 to tox env list * Make profiler timestamp json.dumps friendly * Replace deprecated library function os.popen() with subprocess * Add DevStack plugin 0.4.0 ----- * Remove Py33 support * Make it possible to specify file path as a source for trace * Remove support for py26 * Improve HTML reports performance * Fix TracedMeta class * Fix a couple of typos in doc strings * Fix Ceilometer parser to use events * remove python 2.6 trove classifier * Add TracedMeta class * Update requirements * Deprecated tox -downloadcache option removed * Fix enable/disable compatibility * Add hacking rules & fix hacking issues 0.3.1 ----- * Make api-paste.ini config optional * Fix minor typos in the multi-backend specification * Spec: Integration Testing * Spec: Better DevStack Integration * Spec: Multi Backend support * Spec: Optional options in api-paste.ini * Add specs base structure * Update .gitreview for new namespace * Fix date parsing when there's not milliseconds in the date * Various cleanups * Remove version from setup.cfg * Stop using intersphinx * Rename doc environment to docs * Imporve generated trace html * Adding a hits to notice operator when trace not found 0.3.0 ----- * Cut version 0.3.0 * add more unit tests * Allow N-keys (one should apply) * Some minor fixes in README.rst * ReadMe updates with CLI commands * Add entry point for OSProfiler, that display traces * Remove dead code * Add OSprofiler docs * Fix wrong code duplication in utils.itersubclasses() * Use compare\_digest or an equivalent when available 0.2.5 ----- * Imporve read me * Fix issue with trace\_cls * Add @profiler.trace\_cls decorator * Prevent Messaging to resend failed notifications * Update README.rst with some small adjustments * Some grammar-related imprevements 0.2.4 ----- * Add alternative way to dissable middleware * Improve tracing of sqlalchemy 0.2.3 ----- * Fix ceilometer parse notifications 0.2.2 ----- * Improve a bit README.rst * Fix & improve trace decorator * Fix some typos in README.rst 0.2.1 ----- * Update README.rst * Add @profiler.trace decorator * Add missing tests for messaging notifer plugin 0.2.0 ----- * Add notifier plugin based on Ceilometer * Add base for notifier plugins * Make profiler.utils private * Improve ceilometer notifications getter * Move public methods to top of sqlalchemy module * Refactor web.add\_trace\_id\_header() * Make a cleaner API for osporfiler * Add "\_" to names of private methods 0.1.1 ----- * Remove unused libs from requirments and fix info in setup.cfg 0.1.0 ----- * Add extra docs in sqlalchemy module * Make hmac required argument in profiler.Profiler.init * Refactor WSGI.middleware and imporve test coverage * Improve test coverage * Improve README * Base64 encode the 'X-Trace-Info' header * Fix text requirements * Edit notifier.notify() * Add sanity tests for profiler and hmac usage * Imporve ceilometer parser * Split code sugar and logic in Profiler class * Simplify notifer API * Add git review file * Add in hmac signing/verification * Make name also use a deque * Use a collections.deque which has thread safe pop/append 0.0.1 ----- * Add work around if not all messages were consumed by ceilometer * Remove information about service in profiler * Add parser of ceilometer notifications * Fix setup.cfg python 2.6 is supported as well * Add possibility to disable sqlalchemy tracing * Fix WSGI middleware and add unit tests * Remove from sqlachemy.after\_execute notifcation resutls and add UTs * Imporove profiler and add UTs * Update global requirments * Remove unused dependency from requirments.txt * Fix licenses * Fix pep * Add tracer for sqlalchemy * Add WSGI Middleware * Add profiler class * Init Strucutre of lib * Initial commit osprofiler-1.15.2/LICENSE0000666000175100017510000002404113241117762015024 0ustar zuulzuul00000000000000Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: You must give any other recipients of the Work or Derivative Works a copy of this License; and You must cause any modified files to carry prominent notices stating that You changed the files; and You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. osprofiler-1.15.2/PKG-INFO0000664000175100017510000000443213241120161015077 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: osprofiler Version: 1.15.2 Summary: OpenStack Profiler Library Home-page: https://docs.openstack.org/osprofiler/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/tc/badges/osprofiler.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on =========================================================== OSProfiler -- Library for cross-project profiling library =========================================================== .. image:: https://img.shields.io/pypi/v/osprofiler.svg :target: https://pypi.python.org/pypi/osprofiler/ :alt: Latest Version .. image:: https://img.shields.io/pypi/dm/osprofiler.svg :target: https://pypi.python.org/pypi/osprofiler/ :alt: Downloads OSProfiler provides a tiny but powerful library that is used by most (soon to be all) OpenStack projects and their python clients. It provides functionality to be able to generate 1 trace per request, that goes through all involved services. This trace can then be extracted and used to build a tree of calls which can be quite handy for a variety of reasons (for example in isolating cross-project performance issues). * Free software: Apache license * Documentation: https://docs.openstack.org/osprofiler/latest/ * Source: https://git.openstack.org/cgit/openstack/osprofiler * Bugs: https://bugs.launchpad.net/osprofiler Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Developers Classifier: Intended Audience :: Information Technology Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3.5 osprofiler-1.15.2/test-requirements.txt0000666000175100017510000000107513241120010020236 0ustar zuulzuul00000000000000hacking>=0.12.0,!=0.13.0,<0.14 # Apache-2.0 coverage>=3.6 # Apache-2.0 ddt>=1.0.1 # MIT mock>=2.0 # BSD python-subunit>=0.0.18 # Apache-2.0/BSD testrepository>=0.0.18 # Apache-2.0/BSD testtools>=1.4.0 # MIT openstackdocstheme>=1.11.0 # Apache-2.0 sphinx>=1.6.2 # BSD # Bandit security code scanner bandit>=1.1.0 # Apache-2.0 python-ceilometerclient>=2.5.0 # Apache-2.0 pymongo>=3.0.2,!=3.1 # Apache-2.0 # Elasticsearch python client elasticsearch>=2.0.0,<=3.0.0 # Apache-2.0 # Redis python client redis>=2.10.0 # MIT # Build release notes reno>=1.8.0 # Apache-2.0 osprofiler-1.15.2/CONTRIBUTING.rst0000666000175100017510000000103713241117762016460 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/osprofiler osprofiler-1.15.2/.testr.conf0000666000175100017510000000034613241117762016107 0ustar zuulzuul00000000000000[DEFAULT] test_command=OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./osprofiler/tests/unit} $LISTOPT $IDOPTION test_id_option=--load-list $IDFILE test_list_option=--list osprofiler-1.15.2/AUTHORS0000664000175100017510000000366613241120160015061 0ustar zuulzuul00000000000000Adam Spiers Akihiro Motoki Alexander Ignatyev Alexey Yelistratov Andreas Jaeger Andreas Jaeger Andrey Kurilin Atsushi SAKAI BENJAMIN VANHAVERMAET Boris Pavlovic Carlos Goncalves ChangBo Guo(gcb) Davanum Srinivas Dina Belova Doug Hellmann Flavio Percoco Harshada Mangesh Kakad Hongbin Lu Ilya Shakhat Jamie Lennox Javier Peña Jeremy Stanley Joshua Harlow Joshua Harlow Mikhail Dubov Monty Taylor Moshe Levi Munoz, Obed N Oleksii Chuprykov Ondřej Nový OpenStack Release Bot Roman Podoliaka Simon Pasquier Thomas Bechtold Timur Sufiev Tony Xu Tovin Seven Victor Morales Vipin Balachandran Vu Cong Tuan Zhi Yan Liu Zuul chenxu <424024687@qq.com> gecong1973 howardlee kavithahr lvdongbing reedip ricolin ritesh.arya shangxiaobj uppi wangxiyuan