pax_global_header00006660000000000000000000000064135027054700014515gustar00rootroot0000000000000052 comment=6b091aba77db44459290808368bd4ab913ef8ba5 python-prometheus-client-0.7.1/000077500000000000000000000000001350270547000165105ustar00rootroot00000000000000python-prometheus-client-0.7.1/.coveragerc000066400000000000000000000004001350270547000206230ustar00rootroot00000000000000[run] branch = True source = prometheus_client omit = prometheus_client/decorator.py [paths] source = prometheus_client .tox/*/lib/python*/site-packages/prometheus_client .tox/pypy/site-packages/prometheus_client [report] show_missing = Truepython-prometheus-client-0.7.1/.gitignore000066400000000000000000000001141350270547000204740ustar00rootroot00000000000000build dist *.egg-info *.pyc *.swp .coverage.* .coverage .tox .*cache htmlcovpython-prometheus-client-0.7.1/.travis.yml000066400000000000000000000012771350270547000206300ustar00rootroot00000000000000sudo: false cache: directories: - $HOME/.cache/pip language: python matrix: include: - python: "2.6" env: TOXENV=py26 - python: "2.7" env: TOXENV=py27 - python: "2.7" env: TOXENV=py27-nooptionals - python: "3.4" env: TOXENV=py34 - python: "3.5" env: TOXENV=py35 - python: "3.6" env: TOXENV=py36 - python: "3.7" env: TOXENV=py37 dist: xenial sudo: true - python: "3.7" env: TOXENV=py37-nooptionals dist: xenial sudo: true - python: "pypy" env: TOXENV=pypy - python: "pypy3" env: TOXENV=pypy3 install: - pip install tox script: - tox notifications: email: false python-prometheus-client-0.7.1/CONTRIBUTING.md000066400000000000000000000032161350270547000207430ustar00rootroot00000000000000# Contributing Prometheus uses GitHub to manage reviews of pull requests. * If you have a trivial fix or improvement, go ahead and create a pull request, addressing (with `@...`) the maintainer of this repository (see [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. * If you plan to do something more involved, first discuss your ideas on [our mailing list]. This will avoid unnecessary work and surely give you and us a good deal of inspiration. * Before your contributions can be landed, they must be signed off under the [Developer Certificate of Origin] which asserts you own and have the right to submit the change under the open source licence used by the project. ## Testing Submitted changes should pass the current tests, and be covered by new test cases when adding functionality. * Run the tests locally using [tox] which executes the full suite on all supported Python versions installed. * Each pull request is gated using [Travis CI] with the results linked on the github page. This must pass before the change can land, note pushing a new change will trigger a retest. ## Style * Code style should follow [PEP 8] generally, and can be checked by running: ``tox -e flake8``. * Import statements can be automatically formatted using [isort]. [our mailing list]: https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers [Developer Certificate of Origin]: https://github.com/prometheus/prometheus/wiki/DCO-signing [isort]: https://pypi.org/project/isort/ [PEP 8]: https://www.python.org/dev/peps/pep-0008/ [tox]: https://tox.readthedocs.io/en/latest/ [Travis CI]: https://docs.travis-ci.com/ python-prometheus-client-0.7.1/LICENSE000066400000000000000000000261351350270547000175240ustar00rootroot00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. python-prometheus-client-0.7.1/MAINTAINERS.md000066400000000000000000000000621350270547000206020ustar00rootroot00000000000000* Brian Brazil python-prometheus-client-0.7.1/NOTICE000066400000000000000000000003541350270547000174160ustar00rootroot00000000000000Prometheus instrumentation library for Python applications Copyright 2015 The Prometheus Authors This product bundles decorator 4.0.10 which is available under a "2-clause BSD" license. For details, see prometheus_client/decorator.py. python-prometheus-client-0.7.1/README.md000066400000000000000000000375631350270547000200050ustar00rootroot00000000000000# Prometheus Python Client The official Python 2 and 3 client for [Prometheus](http://prometheus.io). ## Three Step Demo **One**: Install the client: ``` pip install prometheus_client ``` **Two**: Paste the following into a Python interpreter: ```python from prometheus_client import start_http_server, Summary import random import time # Create a metric to track time spent and requests made. REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request') # Decorate function with metric. @REQUEST_TIME.time() def process_request(t): """A dummy function that takes some time.""" time.sleep(t) if __name__ == '__main__': # Start up the server to expose the metrics. start_http_server(8000) # Generate some requests. while True: process_request(random.random()) ``` **Three**: Visit [http://localhost:8000/](http://localhost:8000/) to view the metrics. From one easy to use decorator you get: * `request_processing_seconds_count`: Number of times this function was called. * `request_processing_seconds_sum`: Total amount of time spent in this function. Prometheus's `rate` function allows calculation of both requests per second, and latency over time from this data. In addition if you're on Linux the `process` metrics expose CPU, memory and other information about the process for free! ## Installation ``` pip install prometheus_client ``` This package can be found on [PyPI](https://pypi.python.org/pypi/prometheus_client). ## Instrumenting Four types of metric are offered: Counter, Gauge, Summary and Histogram. See the documentation on [metric types](http://prometheus.io/docs/concepts/metric_types/) and [instrumentation best practices](https://prometheus.io/docs/practices/instrumentation/#counter-vs-gauge-summary-vs-histogram) on how to use them. ### Counter Counters go up, and reset when the process restarts. ```python from prometheus_client import Counter c = Counter('my_failures', 'Description of counter') c.inc() # Increment by 1 c.inc(1.6) # Increment by given value ``` If there is a suffix of `_total` on the metric name, it will be removed. When exposing the time series for counter, a `_total` suffix will be added. This is for compatibility between OpenMetrics and the Prometheus text format, as OpenMetrics requires the `_total` suffix. There are utilities to count exceptions raised: ```python @c.count_exceptions() def f(): pass with c.count_exceptions(): pass # Count only one type of exception with c.count_exceptions(ValueError): pass ``` ### Gauge Gauges can go up and down. ```python from prometheus_client import Gauge g = Gauge('my_inprogress_requests', 'Description of gauge') g.inc() # Increment by 1 g.dec(10) # Decrement by given value g.set(4.2) # Set to a given value ``` There are utilities for common use cases: ```python g.set_to_current_time() # Set to current unixtime # Increment when entered, decrement when exited. @g.track_inprogress() def f(): pass with g.track_inprogress(): pass ``` A Gauge can also take its value from a callback: ```python d = Gauge('data_objects', 'Number of objects') my_dict = {} d.set_function(lambda: len(my_dict)) ``` ### Summary Summaries track the size and number of events. ```python from prometheus_client import Summary s = Summary('request_latency_seconds', 'Description of summary') s.observe(4.7) # Observe 4.7 (seconds in this case) ``` There are utilities for timing code: ```python @s.time() def f(): pass with s.time(): pass ``` The Python client doesn't store or expose quantile information at this time. ### Histogram Histograms track the size and number of events in buckets. This allows for aggregatable calculation of quantiles. ```python from prometheus_client import Histogram h = Histogram('request_latency_seconds', 'Description of histogram') h.observe(4.7) # Observe 4.7 (seconds in this case) ``` The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds. They can be overridden by passing `buckets` keyword argument to `Histogram`. There are utilities for timing code: ```python @h.time() def f(): pass with h.time(): pass ``` ### Info Info tracks key-value information, usually about a whole target. ```python from prometheus_client import Info i = Info('my_build_version', 'Description of info') i.info({'version': '1.2.3', 'buildhost': 'foo@bar'}) ``` ### Enum Enum tracks which of a set of states something is currently in. ```python from prometheus_client import Enum e = Enum('my_task_state', 'Description of enum', states=['starting', 'running', 'stopped']) e.state('running') ``` ### Labels All metrics can have labels, allowing grouping of related time series. See the best practices on [naming](http://prometheus.io/docs/practices/naming/) and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). Taking a counter as an example: ```python from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels('get', '/').inc() c.labels('post', '/submit').inc() ``` Labels can also be passed as keyword-arguments: ```python from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels(method='get', endpoint='/').inc() c.labels(method='post', endpoint='/submit').inc() ``` ### Process Collector The Python client automatically exports metrics about process CPU usage, RAM, file descriptors and start time. These all have the prefix `process`, and are only currently available on Linux. The namespace and pid constructor arguments allows for exporting metrics about other processes, for example: ``` ProcessCollector(namespace='mydaemon', pid=lambda: open('/var/run/daemon.pid').read()) ``` ### Platform Collector The client also automatically exports some metadata about Python. If using Jython, metadata about the JVM in use is also included. This information is available as labels on the `python_info` metric. The value of the metric is 1, since it is the labels that carry information. ## Exporting There are several options for exporting metrics. ### HTTP Metrics are usually exposed over HTTP, to be read by the Prometheus server. The easiest way to do this is via `start_http_server`, which will start a HTTP server in a daemon thread on the given port: ```python from prometheus_client import start_http_server start_http_server(8000) ``` Visit [http://localhost:8000/](http://localhost:8000/) to view the metrics. To add Prometheus exposition to an existing HTTP server, see the `MetricsHandler` class which provides a `BaseHTTPRequestHandler`. It also serves as a simple example of how to write a custom endpoint. #### Twisted To use prometheus with [twisted](https://twistedmatrix.com/), there is `MetricsResource` which exposes metrics as a twisted resource. ```python from prometheus_client.twisted import MetricsResource from twisted.web.server import Site from twisted.web.resource import Resource from twisted.internet import reactor root = Resource() root.putChild(b'metrics', MetricsResource()) factory = Site(root) reactor.listenTCP(8000, factory) reactor.run() ``` #### WSGI To use Prometheus with [WSGI](http://wsgi.readthedocs.org/en/latest/), there is `make_wsgi_app` which creates a WSGI application. ```python from prometheus_client import make_wsgi_app from wsgiref.simple_server import make_server app = make_wsgi_app() httpd = make_server('', 8000, app) httpd.serve_forever() ``` Such an application can be useful when integrating Prometheus metrics with WSGI apps. The method `start_wsgi_server` can be used to serve the metrics through the WSGI reference implementation in a new thread. ```python from prometheus_client import start_wsgi_server start_wsgi_server(8000) ``` #### Flask To use Prometheus with [Flask](http://flask.pocoo.org/) we need to serve metrics through a Prometheus WSGI application. This can be achieved using [Flask's application dispatching](http://flask.pocoo.org/docs/latest/patterns/appdispatch/). Below is a working example. Save the snippet below in a `myapp.py` file ```python from flask import Flask from werkzeug.wsgi import DispatcherMiddleware from prometheus_client import make_wsgi_app # Create my app app = Flask(__name__) # Add prometheus wsgi middleware to route /metrics requests app_dispatch = DispatcherMiddleware(app, { '/metrics': make_wsgi_app() }) ``` Run the example web application like this ```bash # Install uwsgi if you do not have it pip install uwsgi uwsgi --http 127.0.0.1:8000 --wsgi-file myapp.py --callable app_dispatch ``` Visit http://localhost:8000/metrics to see the metrics ### Node exporter textfile collector The [textfile collector](https://github.com/prometheus/node_exporter#textfile-collector) allows machine-level statistics to be exported out via the Node exporter. This is useful for monitoring cronjobs, or for writing cronjobs to expose metrics about a machine system that the Node exporter does not support or would not make sense to perform at every scrape (for example, anything involving subprocesses). ```python from prometheus_client import CollectorRegistry, Gauge, write_to_textfile registry = CollectorRegistry() g = Gauge('raid_status', '1 if raid array is okay', registry=registry) g.set(1) write_to_textfile('/configured/textfile/path/raid.prom', registry) ``` A separate registry is used, as the default registry may contain other metrics such as those from the Process Collector. ## Exporting to a Pushgateway The [Pushgateway](https://github.com/prometheus/pushgateway) allows ephemeral and batch jobs to expose their metrics to Prometheus. ```python from prometheus_client import CollectorRegistry, Gauge, push_to_gateway registry = CollectorRegistry() g = Gauge('job_last_success_unixtime', 'Last time a batch job successfully finished', registry=registry) g.set_to_current_time() push_to_gateway('localhost:9091', job='batchA', registry=registry) ``` A separate registry is used, as the default registry may contain other metrics such as those from the Process Collector. Pushgateway functions take a grouping key. `push_to_gateway` replaces metrics with the same grouping key, `pushadd_to_gateway` only replaces metrics with the same name and grouping key and `delete_from_gateway` deletes metrics with the given job and grouping key. See the [Pushgateway documentation](https://github.com/prometheus/pushgateway/blob/master/README.md) for more information. `instance_ip_grouping_key` returns a grouping key with the instance label set to the host's IP address. ### Handlers for authentication If the push gateway you are connecting to is protected with HTTP Basic Auth, you can use a special handler to set the Authorization header. ```python from prometheus_client import CollectorRegistry, Gauge, push_to_gateway from prometheus_client.exposition import basic_auth_handler def my_auth_handler(url, method, timeout, headers, data): username = 'foobar' password = 'secret123' return basic_auth_handler(url, method, timeout, headers, data, username, password) registry = CollectorRegistry() g = Gauge('job_last_success_unixtime', 'Last time a batch job successfully finished', registry=registry) g.set_to_current_time() push_to_gateway('localhost:9091', job='batchA', registry=registry, handler=my_auth_handler) ``` ## Bridges It is also possible to expose metrics to systems other than Prometheus. This allows you to take advantage of Prometheus instrumentation even if you are not quite ready to fully transition to Prometheus yet. ### Graphite Metrics are pushed over TCP in the Graphite plaintext format. ```python from prometheus_client.bridge.graphite import GraphiteBridge gb = GraphiteBridge(('graphite.your.org', 2003)) # Push once. gb.push() # Push every 10 seconds in a daemon thread. gb.start(10.0) ``` ## Custom Collectors Sometimes it is not possible to directly instrument code, as it is not in your control. This requires you to proxy metrics from other systems. To do so you need to create a custom collector, for example: ```python from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY class CustomCollector(object): def collect(self): yield GaugeMetricFamily('my_gauge', 'Help text', value=7) c = CounterMetricFamily('my_counter_total', 'Help text', labels=['foo']) c.add_metric(['bar'], 1.7) c.add_metric(['baz'], 3.8) yield c REGISTRY.register(CustomCollector()) ``` `SummaryMetricFamily` and `HistogramMetricFamily` work similarly. A collector may implement a `describe` method which returns metrics in the same format as `collect` (though you don't have to include the samples). This is used to predetermine the names of time series a `CollectorRegistry` exposes and thus to detect collisions and duplicate registrations. Usually custom collectors do not have to implement `describe`. If `describe` is not implemented and the CollectorRegistry was created with `auto_desribe=True` (which is the case for the default registry) then `collect` will be called at registration time instead of `describe`. If this could cause problems, either implement a proper `describe`, or if that's not practical have `describe` return an empty list. ## Multiprocess Mode (Gunicorn) Prometheus client libraries presume a threaded model, where metrics are shared across workers. This doesn't work so well for languages such as Python where it's common to have processes rather than threads to handle large workloads. To handle this the client library can be put in multiprocess mode. This comes with a number of limitations: - Registries can not be used as normal, all instantiated metrics are exported - Custom collectors do not work (e.g. cpu and memory metrics) - Info and Enum metrics do not work - The pushgateway cannot be used - Gauges cannot use the `pid` label There's several steps to getting this working: **One**: Gunicorn deployment The `prometheus_multiproc_dir` environment variable must be set to a directory that the client library can use for metrics. This directory must be wiped between Gunicorn runs (before startup is recommended). Put the following in the config file: ```python from prometheus_client import multiprocess def child_exit(server, worker): multiprocess.mark_process_dead(worker.pid) ``` **Two**: Inside the application ```python from prometheus_client import multiprocess from prometheus_client import generate_latest, CollectorRegistry, CONTENT_TYPE_LATEST, Gauge # Example gauge. IN_PROGRESS = Gauge("inprogress_requests", "help", multiprocess_mode='livesum') # Expose metrics. @IN_PROGRESS.track_inprogress() def app(environ, start_response): registry = CollectorRegistry() multiprocess.MultiProcessCollector(registry) data = generate_latest(registry) status = '200 OK' response_headers = [ ('Content-type', CONTENT_TYPE_LATEST), ('Content-Length', str(len(data))) ] start_response(status, response_headers) return iter([data]) ``` **Three**: Instrumentation Counters, Summarys and Histograms work as normal. Gauges have several modes they can run in, which can be selected with the `multiprocess_mode` parameter. - 'all': Default. Return a timeseries per process alive or dead. - 'liveall': Return a timeseries per process that is still alive. - 'livesum': Return a single timeseries that is the sum of the values of alive processes. - 'max': Return a single timeseries that is the maximum of the values of all processes, alive or dead. - 'min': Return a single timeseries that is the minimum of the values of all processes, alive or dead. ## Parser The Python client supports parsing the Prometheus text format. This is intended for advanced use cases where you have servers exposing Prometheus metrics and need to get them into some other system. ```python from prometheus_client.parser import text_string_to_metric_families for family in text_string_to_metric_families(u"my_gauge 1.0\n"): for sample in family.samples: print("Name: {0} Labels: {1} Value: {2}".format(*sample)) ``` python-prometheus-client-0.7.1/prometheus_client/000077500000000000000000000000001350270547000222415ustar00rootroot00000000000000python-prometheus-client-0.7.1/prometheus_client/__init__.py000066400000000000000000000032771350270547000243630ustar00rootroot00000000000000#!/usr/bin/python from . import exposition from . import gc_collector from . import metrics from . import metrics_core from . import platform_collector from . import process_collector from . import registry __all__ = ['Counter', 'Gauge', 'Summary', 'Histogram', 'Info', 'Enum'] CollectorRegistry = registry.CollectorRegistry REGISTRY = registry.REGISTRY Metric = metrics_core.Metric Counter = metrics.Counter Gauge = metrics.Gauge Summary = metrics.Summary Histogram = metrics.Histogram Info = metrics.Info Enum = metrics.Enum CONTENT_TYPE_LATEST = exposition.CONTENT_TYPE_LATEST generate_latest = exposition.generate_latest MetricsHandler = exposition.MetricsHandler make_wsgi_app = exposition.make_wsgi_app start_http_server = exposition.start_http_server start_wsgi_server = exposition.start_wsgi_server write_to_textfile = exposition.write_to_textfile push_to_gateway = exposition.push_to_gateway pushadd_to_gateway = exposition.pushadd_to_gateway delete_from_gateway = exposition.delete_from_gateway instance_ip_grouping_key = exposition.instance_ip_grouping_key ProcessCollector = process_collector.ProcessCollector PROCESS_COLLECTOR = process_collector.PROCESS_COLLECTOR PlatformCollector = platform_collector.PlatformCollector PLATFORM_COLLECTOR = platform_collector.PLATFORM_COLLECTOR GCCollector = gc_collector.GCCollector GC_COLLECTOR = gc_collector.GC_COLLECTOR if __name__ == '__main__': c = Counter('cc', 'A counter') c.inc() g = Gauge('gg', 'A gauge') g.set(17) s = Summary('ss', 'A summary', ['a', 'b']) s.labels('c', 'd').observe(17) h = Histogram('hh', 'A histogram') h.observe(.6) start_http_server(8000) import time while True: time.sleep(1) python-prometheus-client-0.7.1/prometheus_client/bridge/000077500000000000000000000000001350270547000234755ustar00rootroot00000000000000python-prometheus-client-0.7.1/prometheus_client/bridge/__init__.py000066400000000000000000000000001350270547000255740ustar00rootroot00000000000000python-prometheus-client-0.7.1/prometheus_client/bridge/graphite.py000077500000000000000000000046651350270547000256700ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals import logging import re import socket import threading import time from timeit import default_timer from ..registry import REGISTRY # Roughly, have to keep to what works as a file name. # We also remove periods, so labels can be distinguished. _INVALID_GRAPHITE_CHARS = re.compile(r"[^a-zA-Z0-9_-]") def _sanitize(s): return _INVALID_GRAPHITE_CHARS.sub('_', s) class _RegularPush(threading.Thread): def __init__(self, pusher, interval, prefix): super(_RegularPush, self).__init__() self._pusher = pusher self._interval = interval self._prefix = prefix def run(self): wait_until = default_timer() while True: while True: now = default_timer() if now >= wait_until: # May need to skip some pushes. while wait_until < now: wait_until += self._interval break # time.sleep can return early. time.sleep(wait_until - now) try: self._pusher.push(prefix=self._prefix) except IOError: logging.exception("Push failed") class GraphiteBridge(object): def __init__(self, address, registry=REGISTRY, timeout_seconds=30, _timer=time.time): self._address = address self._registry = registry self._timeout = timeout_seconds self._timer = _timer def push(self, prefix=''): now = int(self._timer()) output = [] prefixstr = '' if prefix: prefixstr = prefix + '.' for metric in self._registry.collect(): for s in metric.samples: if s.labels: labelstr = '.' + '.'.join( ['{0}.{1}'.format( _sanitize(k), _sanitize(v)) for k, v in sorted(s.labels.items())]) else: labelstr = '' output.append('{0}{1}{2} {3} {4}\n'.format( prefixstr, _sanitize(s.name), labelstr, float(s.value), now)) conn = socket.create_connection(self._address, self._timeout) conn.sendall(''.join(output).encode('ascii')) conn.close() def start(self, interval=60.0, prefix=''): t = _RegularPush(self, interval, prefix) t.daemon = True t.start() python-prometheus-client-0.7.1/prometheus_client/context_managers.py000066400000000000000000000032331350270547000261550ustar00rootroot00000000000000from __future__ import unicode_literals from timeit import default_timer from .decorator import decorate class ExceptionCounter(object): def __init__(self, counter, exception): self._counter = counter self._exception = exception def __enter__(self): pass def __exit__(self, typ, value, traceback): if isinstance(value, self._exception): self._counter.inc() def __call__(self, f): def wrapped(func, *args, **kwargs): with self: return func(*args, **kwargs) return decorate(f, wrapped) class InprogressTracker(object): def __init__(self, gauge): self._gauge = gauge def __enter__(self): self._gauge.inc() def __exit__(self, typ, value, traceback): self._gauge.dec() def __call__(self, f): def wrapped(func, *args, **kwargs): with self: return func(*args, **kwargs) return decorate(f, wrapped) class Timer(object): def __init__(self, callback): self._callback = callback def _new_timer(self): return self.__class__(self._callback) def __enter__(self): self._start = default_timer() def __exit__(self, typ, value, traceback): # Time can go backwards. duration = max(default_timer() - self._start, 0) self._callback(duration) def __call__(self, f): def wrapped(func, *args, **kwargs): # Obtaining new instance of timer every time # ensures thread safety and reentrancy. with self._new_timer(): return func(*args, **kwargs) return decorate(f, wrapped) python-prometheus-client-0.7.1/prometheus_client/core.py000066400000000000000000000015771350270547000235550ustar00rootroot00000000000000from __future__ import unicode_literals from .metrics import Counter, Enum, Gauge, Histogram, Info, Summary from .metrics_core import ( CounterMetricFamily, GaugeHistogramMetricFamily, GaugeMetricFamily, HistogramMetricFamily, InfoMetricFamily, Metric, StateSetMetricFamily, SummaryMetricFamily, UnknownMetricFamily, UntypedMetricFamily) from .registry import CollectorRegistry, REGISTRY from .samples import Exemplar, Sample, Timestamp __all__ = ( 'CollectorRegistry', 'Counter', 'CounterMetricFamily', 'Enum', 'Exemplar', 'Gauge', 'GaugeHistogramMetricFamily', 'GaugeMetricFamily', 'Histogram', 'HistogramMetricFamily', 'Info', 'InfoMetricFamily', 'Metric', 'REGISTRY', 'Sample', 'StateSetMetricFamily', 'Summary', 'SummaryMetricFamily', 'Timestamp', 'UnknownMetricFamily', 'UntypedMetricFamily', ) python-prometheus-client-0.7.1/prometheus_client/decorator.py000066400000000000000000000366721350270547000246130ustar00rootroot00000000000000# ######################### LICENSE ############################ # # Copyright (c) 2005-2016, Michele Simionato # All rights reserved. # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # Redistributions in bytecode form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS # OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR # TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH # DAMAGE. """ Decorator module, see http://pypi.python.org/pypi/decorator for the documentation. """ from __future__ import print_function import collections import inspect import itertools import operator import re import sys __version__ = '4.0.10' if sys.version_info >= (3,): from inspect import getfullargspec def get_init(cls): return cls.__init__ else: class getfullargspec(object): "A quick and dirty replacement for getfullargspec for Python 2.X" def __init__(self, f): self.args, self.varargs, self.varkw, self.defaults = \ inspect.getargspec(f) self.kwonlyargs = [] self.kwonlydefaults = None def __iter__(self): yield self.args yield self.varargs yield self.varkw yield self.defaults getargspec = inspect.getargspec def get_init(cls): return cls.__init__.__func__ # getargspec has been deprecated in Python 3.5 ArgSpec = collections.namedtuple( 'ArgSpec', 'args varargs varkw defaults') def getargspec(f): """A replacement for inspect.getargspec""" spec = getfullargspec(f) return ArgSpec(spec.args, spec.varargs, spec.varkw, spec.defaults) DEF = re.compile(r'\s*def\s*([_\w][_\w\d]*)\s*\(') # basic functionality class FunctionMaker(object): """ An object with the ability to create functions with a given signature. It has attributes name, doc, module, signature, defaults, dict and methods update and make. """ # Atomic get-and-increment provided by the GIL _compile_count = itertools.count() def __init__(self, func=None, name=None, signature=None, defaults=None, doc=None, module=None, funcdict=None): self.shortsignature = signature if func: # func can be a class or a callable, but not an instance method self.name = func.__name__ if self.name == '': # small hack for lambda functions self.name = '_lambda_' self.doc = func.__doc__ self.module = func.__module__ if inspect.isfunction(func): argspec = getfullargspec(func) self.annotations = getattr(func, '__annotations__', {}) for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', 'kwonlydefaults'): setattr(self, a, getattr(argspec, a)) for i, arg in enumerate(self.args): setattr(self, 'arg%d' % i, arg) if sys.version_info < (3,): # easy way self.shortsignature = self.signature = ( inspect.formatargspec( formatvalue=lambda val: "", *argspec)[1:-1]) else: # Python 3 way allargs = list(self.args) allshortargs = list(self.args) if self.varargs: allargs.append('*' + self.varargs) allshortargs.append('*' + self.varargs) elif self.kwonlyargs: allargs.append('*') # single star syntax for a in self.kwonlyargs: allargs.append('%s=None' % a) allshortargs.append('%s=%s' % (a, a)) if self.varkw: allargs.append('**' + self.varkw) allshortargs.append('**' + self.varkw) self.signature = ', '.join(allargs) self.shortsignature = ', '.join(allshortargs) self.dict = func.__dict__.copy() # func=None happens when decorating a caller if name: self.name = name if signature is not None: self.signature = signature if defaults: self.defaults = defaults if doc: self.doc = doc if module: self.module = module if funcdict: self.dict = funcdict # check existence required attributes assert hasattr(self, 'name') if not hasattr(self, 'signature'): raise TypeError('You are decorating a non function: %s' % func) def update(self, func, **kw): "Update the signature of func with the data in self" func.__name__ = self.name func.__doc__ = getattr(self, 'doc', None) func.__dict__ = getattr(self, 'dict', {}) func.__defaults__ = getattr(self, 'defaults', ()) func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) func.__annotations__ = getattr(self, 'annotations', None) try: frame = sys._getframe(3) except AttributeError: # for IronPython and similar implementations callermodule = '?' else: callermodule = frame.f_globals.get('__name__', '?') func.__module__ = getattr(self, 'module', callermodule) func.__dict__.update(kw) def make(self, src_templ, evaldict=None, addsource=False, **attrs): "Make a new function from a given template and update the signature" src = src_templ % vars(self) # expand name and signature evaldict = evaldict or {} mo = DEF.match(src) if mo is None: raise SyntaxError('not a valid function template\n%s' % src) name = mo.group(1) # extract the function name names = set([name] + [arg.strip(' *') for arg in self.shortsignature.split(',')]) for n in names: if n in ('_func_', '_call_'): raise NameError('%s is overridden in\n%s' % (n, src)) if not src.endswith('\n'): # add a newline for old Pythons src += '\n' # Ensure each generated function has a unique filename for profilers # (such as cProfile) that depend on the tuple of (, # , ) being unique. filename = '' % (next(self._compile_count),) try: code = compile(src, filename, 'single') exec(code, evaldict) except: print('Error in generated code:', file=sys.stderr) print(src, file=sys.stderr) raise func = evaldict[name] if addsource: attrs['__source__'] = src self.update(func, **attrs) return func @classmethod def create(cls, obj, body, evaldict, defaults=None, doc=None, module=None, addsource=True, **attrs): """ Create a function from the strings name, signature and body. evaldict is the evaluation dictionary. If addsource is true an attribute __source__ is added to the result. The attributes attrs are added, if any. """ if isinstance(obj, str): # "name(signature)" name, rest = obj.strip().split('(', 1) signature = rest[:-1] # strip a right parens func = None else: # a function name = None signature = None func = obj self = cls(func, name, signature, defaults, doc, module) ibody = '\n'.join(' ' + line for line in body.splitlines()) return self.make('def %(name)s(%(signature)s):\n' + ibody, evaldict, addsource, **attrs) def decorate(func, caller): """ decorate(func, caller) decorates a function using a caller. """ evaldict = dict(_call_=caller, _func_=func) fun = FunctionMaker.create( func, "return _call_(_func_, %(shortsignature)s)", evaldict, __wrapped__=func) if hasattr(func, '__qualname__'): fun.__qualname__ = func.__qualname__ return fun def decorator(caller, _func=None): """decorator(caller) converts a caller function into a decorator""" if _func is not None: # return a decorated function # this is obsolete behavior; you should use decorate instead return decorate(_func, caller) # else return a decorator function if inspect.isclass(caller): name = caller.__name__.lower() doc = 'decorator(%s) converts functions/generators into ' \ 'factories of %s objects' % (caller.__name__, caller.__name__) elif inspect.isfunction(caller): if caller.__name__ == '': name = '_lambda_' else: name = caller.__name__ doc = caller.__doc__ else: # assume caller is an object with a __call__ method name = caller.__class__.__name__.lower() doc = caller.__call__.__doc__ evaldict = dict(_call_=caller, _decorate_=decorate) return FunctionMaker.create( '%s(func)' % name, 'return _decorate_(func, _call_)', evaldict, doc=doc, module=caller.__module__, __wrapped__=caller) # ####################### contextmanager ####################### # try: # Python >= 3.2 from contextlib import _GeneratorContextManager except ImportError: # Python >= 2.5 from contextlib import GeneratorContextManager as _GeneratorContextManager class ContextManager(_GeneratorContextManager): def __call__(self, func): """Context manager decorator""" return FunctionMaker.create( func, "with _self_: return _func_(%(shortsignature)s)", dict(_self_=self, _func_=func), __wrapped__=func) init = getfullargspec(_GeneratorContextManager.__init__) n_args = len(init.args) if n_args == 2 and not init.varargs: # (self, genobj) Python 2.7 def __init__(self, g, *a, **k): return _GeneratorContextManager.__init__(self, g(*a, **k)) ContextManager.__init__ = __init__ elif n_args == 2 and init.varargs: # (self, gen, *a, **k) Python 3.4 pass elif n_args == 4: # (self, gen, args, kwds) Python 3.5 def __init__(self, g, *a, **k): return _GeneratorContextManager.__init__(self, g, a, k) ContextManager.__init__ = __init__ contextmanager = decorator(ContextManager) # ############################ dispatch_on ############################ # def append(a, vancestors): """ Append ``a`` to the list of the virtual ancestors, unless it is already included. """ add = True for j, va in enumerate(vancestors): if issubclass(va, a): add = False break if issubclass(a, va): vancestors[j] = a add = False if add: vancestors.append(a) # inspired from simplegeneric by P.J. Eby and functools.singledispatch def dispatch_on(*dispatch_args): """ Factory of decorators turning a function into a generic function dispatching on the given arguments. """ assert dispatch_args, 'No dispatch args passed' dispatch_str = '(%s,)' % ', '.join(dispatch_args) def check(arguments, wrong=operator.ne, msg=''): """Make sure one passes the expected number of arguments""" if wrong(len(arguments), len(dispatch_args)): raise TypeError('Expected %d arguments, got %d%s' % (len(dispatch_args), len(arguments), msg)) def gen_func_dec(func): """Decorator turning a function into a generic function""" # first check the dispatch arguments argset = set(getfullargspec(func).args) if not set(dispatch_args) <= argset: raise NameError('Unknown dispatch arguments %s' % dispatch_str) typemap = {} def vancestors(*types): """ Get a list of sets of virtual ancestors for the given types """ check(types) ras = [[] for _ in range(len(dispatch_args))] for types_ in typemap: for t, type_, ra in zip(types, types_, ras): if issubclass(t, type_) and type_ not in t.__mro__: append(type_, ra) return [set(ra) for ra in ras] def ancestors(*types): """ Get a list of virtual MROs, one for each type """ check(types) lists = [] for t, vas in zip(types, vancestors(*types)): n_vas = len(vas) if n_vas > 1: raise RuntimeError( 'Ambiguous dispatch for %s: %s' % (t, vas)) elif n_vas == 1: va, = vas mro = type('t', (t, va), {}).__mro__[1:] else: mro = t.__mro__ lists.append(mro[:-1]) # discard t and object return lists def register(*types): """ Decorator to register an implementation for the given types """ check(types) def dec(f): check(getfullargspec(f).args, operator.lt, ' in ' + f.__name__) typemap[types] = f return f return dec def dispatch_info(*types): """ An utility to introspect the dispatch algorithm """ check(types) lst = [] for anc in itertools.product(*ancestors(*types)): lst.append(tuple(a.__name__ for a in anc)) return lst def _dispatch(dispatch_args, *args, **kw): types = tuple(type(arg) for arg in dispatch_args) try: # fast path f = typemap[types] except KeyError: pass else: return f(*args, **kw) combinations = itertools.product(*ancestors(*types)) next(combinations) # the first one has been already tried for types_ in combinations: f = typemap.get(types_) if f is not None: return f(*args, **kw) # else call the default implementation return func(*args, **kw) return FunctionMaker.create( func, 'return _f_(%s, %%(shortsignature)s)' % dispatch_str, dict(_f_=_dispatch), register=register, default=func, typemap=typemap, vancestors=vancestors, ancestors=ancestors, dispatch_info=dispatch_info, __wrapped__=func) gen_func_dec.__name__ = 'dispatch_on' + dispatch_str return gen_func_dec python-prometheus-client-0.7.1/prometheus_client/exposition.py000066400000000000000000000346451350270547000250300ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals import base64 from contextlib import closing import os import socket import sys import threading from wsgiref.simple_server import make_server, WSGIRequestHandler from .openmetrics import exposition as openmetrics from .registry import REGISTRY from .utils import floatToGoString try: from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer from SocketServer import ThreadingMixIn from urllib2 import build_opener, Request, HTTPHandler from urllib import quote_plus from urlparse import parse_qs, urlparse except ImportError: # Python 3 from http.server import BaseHTTPRequestHandler, HTTPServer from socketserver import ThreadingMixIn from urllib.request import build_opener, Request, HTTPHandler from urllib.parse import quote_plus, parse_qs, urlparse CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8') """Content type of the latest text format""" PYTHON26_OR_OLDER = sys.version_info < (2, 7) def make_wsgi_app(registry=REGISTRY): """Create a WSGI app which serves the metrics from a registry.""" def prometheus_app(environ, start_response): params = parse_qs(environ.get('QUERY_STRING', '')) r = registry encoder, content_type = choose_encoder(environ.get('HTTP_ACCEPT')) if 'name[]' in params: r = r.restricted_registry(params['name[]']) output = encoder(r) status = str('200 OK') headers = [(str('Content-type'), content_type)] start_response(status, headers) return [output] return prometheus_app class _SilentHandler(WSGIRequestHandler): """WSGI handler that does not log requests.""" def log_message(self, format, *args): """Log nothing.""" def start_wsgi_server(port, addr='', registry=REGISTRY): """Starts a WSGI server for prometheus metrics as a daemon thread.""" app = make_wsgi_app(registry) httpd = make_server(addr, port, app, handler_class=_SilentHandler) t = threading.Thread(target=httpd.serve_forever) t.daemon = True t.start() def generate_latest(registry=REGISTRY): """Returns the metrics from the registry in latest text format as a string.""" def sample_line(line): if line.labels: labelstr = '{{{0}}}'.format(','.join( ['{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(line.labels.items())])) else: labelstr = '' timestamp = '' if line.timestamp is not None: # Convert to milliseconds. timestamp = ' {0:d}'.format(int(float(line.timestamp) * 1000)) return '{0}{1} {2}{3}\n'.format( line.name, labelstr, floatToGoString(line.value), timestamp) output = [] for metric in registry.collect(): try: mname = metric.name mtype = metric.type # Munging from OpenMetrics into Prometheus format. if mtype == 'counter': mname = mname + '_total' elif mtype == 'info': mname = mname + '_info' mtype = 'gauge' elif mtype == 'stateset': mtype = 'gauge' elif mtype == 'gaugehistogram': # A gauge histogram is really a gauge, # but this captures the strucutre better. mtype = 'histogram' elif mtype == 'unknown': mtype = 'untyped' output.append('# HELP {0} {1}\n'.format( mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n'))) output.append('# TYPE {0} {1}\n'.format(mname, mtype)) om_samples = {} for s in metric.samples: for suffix in ['_created', '_gsum', '_gcount']: if s.name == metric.name + suffix: # OpenMetrics specific sample, put in a gauge at the end. om_samples.setdefault(suffix, []).append(sample_line(s)) break else: output.append(sample_line(s)) except Exception as exception: exception.args = (exception.args or ('',)) + (metric,) raise for suffix, lines in sorted(om_samples.items()): output.append('# TYPE {0}{1} gauge\n'.format(metric.name, suffix)) output.extend(lines) return ''.join(output).encode('utf-8') def choose_encoder(accept_header): accept_header = accept_header or '' for accepted in accept_header.split(','): if accepted.split(';')[0].strip() == 'application/openmetrics-text': return (openmetrics.generate_latest, openmetrics.CONTENT_TYPE_LATEST) return generate_latest, CONTENT_TYPE_LATEST class MetricsHandler(BaseHTTPRequestHandler): """HTTP handler that gives metrics from ``REGISTRY``.""" registry = REGISTRY def do_GET(self): registry = self.registry params = parse_qs(urlparse(self.path).query) encoder, content_type = choose_encoder(self.headers.get('Accept')) if 'name[]' in params: registry = registry.restricted_registry(params['name[]']) try: output = encoder(registry) except: self.send_error(500, 'error generating metric output') raise self.send_response(200) self.send_header('Content-Type', content_type) self.end_headers() self.wfile.write(output) def log_message(self, format, *args): """Log nothing.""" @classmethod def factory(cls, registry): """Returns a dynamic MetricsHandler class tied to the passed registry. """ # This implementation relies on MetricsHandler.registry # (defined above and defaulted to REGISTRY). # As we have unicode_literals, we need to create a str() # object for type(). cls_name = str(cls.__name__) MyMetricsHandler = type(cls_name, (cls, object), {"registry": registry}) return MyMetricsHandler class _ThreadingSimpleServer(ThreadingMixIn, HTTPServer): """Thread per request HTTP server.""" # Make worker threads "fire and forget". Beginning with Python 3.7 this # prevents a memory leak because ``ThreadingMixIn`` starts to gather all # non-daemon threads in a list in order to join on them at server close. # Enabling daemon threads virtually makes ``_ThreadingSimpleServer`` the # same as Python 3.7's ``ThreadingHTTPServer``. daemon_threads = True def start_http_server(port, addr='', registry=REGISTRY): """Starts an HTTP server for prometheus metrics as a daemon thread""" CustomMetricsHandler = MetricsHandler.factory(registry) httpd = _ThreadingSimpleServer((addr, port), CustomMetricsHandler) t = threading.Thread(target=httpd.serve_forever) t.daemon = True t.start() def write_to_textfile(path, registry): """Write metrics to the given path. This is intended for use with the Node exporter textfile collector. The path must end in .prom for the textfile collector to process it.""" tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident) with open(tmppath, 'wb') as f: f.write(generate_latest(registry)) # rename(2) is atomic. os.rename(tmppath, path) def default_handler(url, method, timeout, headers, data): """Default handler that implements HTTP/HTTPS connections. Used by the push_to_gateway functions. Can be re-used by other handlers.""" def handle(): request = Request(url, data=data) request.get_method = lambda: method for k, v in headers: request.add_header(k, v) resp = build_opener(HTTPHandler).open(request, timeout=timeout) if resp.code >= 400: raise IOError("error talking to pushgateway: {0} {1}".format( resp.code, resp.msg)) return handle def basic_auth_handler(url, method, timeout, headers, data, username=None, password=None): """Handler that implements HTTP/HTTPS connections with Basic Auth. Sets auth headers using supplied 'username' and 'password', if set. Used by the push_to_gateway functions. Can be re-used by other handlers.""" def handle(): """Handler that implements HTTP Basic Auth. """ if username is not None and password is not None: auth_value = '{0}:{1}'.format(username, password).encode('utf-8') auth_token = base64.b64encode(auth_value) auth_header = b'Basic ' + auth_token headers.append(['Authorization', auth_header]) default_handler(url, method, timeout, headers, data)() return handle def push_to_gateway( gateway, job, registry, grouping_key=None, timeout=30, handler=default_handler): """Push metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. If not None, the argument must be a function which accepts the following arguments: url, method, timeout, headers, and content May be used to implement additional functionality not supported by the built-in default handler (such as SSL client certicates, and HTTP authentication mechanisms). 'url' is the URL for the request, the 'gateway' argument described earlier will form the basis of this URL. 'method' is the HTTP method which should be used when carrying out the request. 'timeout' requests not successfully completed after this many seconds should be aborted. If timeout is None, then the handler should not set a timeout. 'headers' is a list of ("header-name","header-value") tuples which must be passed to the pushgateway in the form of HTTP request headers. The function should raise an exception (e.g. IOError) on failure. 'content' is the data which should be used to form the HTTP Message Body. This overwrites all metrics with the same job and grouping_key. This uses the PUT HTTP method.""" _use_gateway('PUT', gateway, job, registry, grouping_key, timeout, handler) def pushadd_to_gateway( gateway, job, registry, grouping_key=None, timeout=30, handler=default_handler): """PushAdd metrics to the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `registry` is an instance of CollectorRegistry `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long push will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This replaces metrics with the same name, job and grouping_key. This uses the POST HTTP method.""" _use_gateway('POST', gateway, job, registry, grouping_key, timeout, handler) def delete_from_gateway( gateway, job, grouping_key=None, timeout=30, handler=default_handler): """Delete metrics from the given pushgateway. `gateway` the url for your push gateway. Either of the form 'http://pushgateway.local', or 'pushgateway.local'. Scheme defaults to 'http' if none is provided `job` is the job label to be attached to all pushed metrics `grouping_key` please see the pushgateway documentation for details. Defaults to None `timeout` is how long delete will attempt to connect before giving up. Defaults to 30s, can be set to None for no timeout. `handler` is an optional function which can be provided to perform requests to the 'gateway'. Defaults to None, in which case an http or https request will be carried out by a default handler. See the 'prometheus_client.push_to_gateway' documentation for implementation requirements. This deletes metrics with the given job and grouping_key. This uses the DELETE HTTP method.""" _use_gateway('DELETE', gateway, job, None, grouping_key, timeout, handler) def _use_gateway(method, gateway, job, registry, grouping_key, timeout, handler): gateway_url = urlparse(gateway) if not gateway_url.scheme or (PYTHON26_OR_OLDER and gateway_url.scheme not in ['http', 'https']): gateway = 'http://{0}'.format(gateway) url = '{0}/metrics/job/{1}'.format(gateway, quote_plus(job)) data = b'' if method != 'DELETE': data = generate_latest(registry) if grouping_key is None: grouping_key = {} url += ''.join( '/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v))) for k, v in sorted(grouping_key.items())) handler( url=url, method=method, timeout=timeout, headers=[('Content-Type', CONTENT_TYPE_LATEST)], data=data, )() def instance_ip_grouping_key(): """Grouping key with instance set to the IP Address of this host.""" with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s: s.connect(('localhost', 0)) return {'instance': s.getsockname()[0]} python-prometheus-client-0.7.1/prometheus_client/gc_collector.py000066400000000000000000000027051350270547000252560ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals import gc import platform from .metrics_core import CounterMetricFamily from .registry import REGISTRY class GCCollector(object): """Collector for Garbage collection statistics.""" def __init__(self, registry=REGISTRY): if not hasattr(gc, 'get_stats') or platform.python_implementation() != 'CPython': return registry.register(self) def collect(self): collected = CounterMetricFamily( 'python_gc_objects_collected', 'Objects collected during gc', labels=['generation'], ) uncollectable = CounterMetricFamily( 'python_gc_objects_uncollectable', 'Uncollectable object found during GC', labels=['generation'], ) collections = CounterMetricFamily( 'python_gc_collections', 'Number of times this generation was collected', labels=['generation'], ) for generation, stat in enumerate(gc.get_stats()): generation = str(generation) collected.add_metric([generation], value=stat['collected']) uncollectable.add_metric([generation], value=stat['uncollectable']) collections.add_metric([generation], value=stat['collections']) return [collected, uncollectable, collections] GC_COLLECTOR = GCCollector() """Default GCCollector in default Registry REGISTRY.""" python-prometheus-client-0.7.1/prometheus_client/metrics.py000066400000000000000000000515071350270547000242710ustar00rootroot00000000000000import sys from threading import Lock import time import types from . import values # retain this import style for testability from .context_managers import ExceptionCounter, InprogressTracker, Timer from .metrics_core import ( Metric, METRIC_LABEL_NAME_RE, METRIC_NAME_RE, RESERVED_METRIC_LABEL_NAME_RE, ) from .registry import REGISTRY from .utils import floatToGoString, INF if sys.version_info > (3,): unicode = str create_bound_method = types.MethodType else: def create_bound_method(func, obj): return types.MethodType(func, obj, obj.__class__) def _build_full_name(metric_type, name, namespace, subsystem, unit): full_name = '' if namespace: full_name += namespace + '_' if subsystem: full_name += subsystem + '_' full_name += name if unit and not full_name.endswith("_" + unit): full_name += "_" + unit if unit and metric_type in ('info', 'stateset'): raise ValueError('Metric name is of a type that cannot have a unit: ' + full_name) if metric_type == 'counter' and full_name.endswith('_total'): full_name = full_name[:-6] # Munge to OpenMetrics. return full_name def _validate_labelnames(cls, labelnames): labelnames = tuple(labelnames) for l in labelnames: if not METRIC_LABEL_NAME_RE.match(l): raise ValueError('Invalid label metric name: ' + l) if RESERVED_METRIC_LABEL_NAME_RE.match(l): raise ValueError('Reserved label metric name: ' + l) if l in cls._reserved_labelnames: raise ValueError('Reserved label metric name: ' + l) return labelnames class MetricWrapperBase(object): _type = None _reserved_labelnames = () def _is_observable(self): # Whether this metric is observable, i.e. # * a metric without label names and values, or # * the child of a labelled metric. return not self._labelnames or (self._labelnames and self._labelvalues) def _is_parent(self): return self._labelnames and not self._labelvalues def _get_metric(self): return Metric(self._name, self._documentation, self._type, self._unit) def describe(self): return [self._get_metric()] def collect(self): metric = self._get_metric() for suffix, labels, value in self._samples(): metric.add_sample(self._name + suffix, labels, value) return [metric] def __init__(self, name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, labelvalues=None, ): self._name = _build_full_name(self._type, name, namespace, subsystem, unit) self._labelnames = _validate_labelnames(self, labelnames) self._labelvalues = tuple(labelvalues or ()) self._kwargs = {} self._documentation = documentation self._unit = unit if not METRIC_NAME_RE.match(self._name): raise ValueError('Invalid metric name: ' + self._name) if self._is_parent(): # Prepare the fields needed for child metrics. self._lock = Lock() self._metrics = {} if self._is_observable(): self._metric_init() if not self._labelvalues: # Register the multi-wrapper parent metric, or if a label-less metric, the whole shebang. if registry: registry.register(self) def labels(self, *labelvalues, **labelkwargs): """Return the child for the given labelset. All metrics can have labels, allowing grouping of related time series. Taking a counter as an example: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels('get', '/').inc() c.labels('post', '/submit').inc() Labels can also be provided as keyword arguments: from prometheus_client import Counter c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) c.labels(method='get', endpoint='/').inc() c.labels(method='post', endpoint='/submit').inc() See the best practices on [naming](http://prometheus.io/docs/practices/naming/) and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). """ if not self._labelnames: raise ValueError('No label names were set when constructing %s' % self) if self._labelvalues: raise ValueError('%s already has labels set (%s); can not chain calls to .labels()' % ( self, dict(zip(self._labelnames, self._labelvalues)) )) if labelvalues and labelkwargs: raise ValueError("Can't pass both *args and **kwargs") if labelkwargs: if sorted(labelkwargs) != sorted(self._labelnames): raise ValueError('Incorrect label names') labelvalues = tuple(unicode(labelkwargs[l]) for l in self._labelnames) else: if len(labelvalues) != len(self._labelnames): raise ValueError('Incorrect label count') labelvalues = tuple(unicode(l) for l in labelvalues) with self._lock: if labelvalues not in self._metrics: self._metrics[labelvalues] = self.__class__( self._name, documentation=self._documentation, labelnames=self._labelnames, unit=self._unit, labelvalues=labelvalues, **self._kwargs ) return self._metrics[labelvalues] def remove(self, *labelvalues): if not self._labelnames: raise ValueError('No label names were set when constructing %s' % self) """Remove the given labelset from the metric.""" if len(labelvalues) != len(self._labelnames): raise ValueError('Incorrect label count (expected %d, got %s)' % (len(self._labelnames), labelvalues)) labelvalues = tuple(unicode(l) for l in labelvalues) with self._lock: del self._metrics[labelvalues] def _samples(self): if self._is_parent(): return self._multi_samples() else: return self._child_samples() def _multi_samples(self): with self._lock: metrics = self._metrics.copy() for labels, metric in metrics.items(): series_labels = list(zip(self._labelnames, labels)) for suffix, sample_labels, value in metric._samples(): yield (suffix, dict(series_labels + list(sample_labels.items())), value) def _child_samples(self): # pragma: no cover raise NotImplementedError('_child_samples() must be implemented by %r' % self) def _metric_init(self): # pragma: no cover """ Initialize the metric object as a child, i.e. when it has labels (if any) set. This is factored as a separate function to allow for deferred initialization. """ raise NotImplementedError('_metric_init() must be implemented by %r' % self) class Counter(MetricWrapperBase): """A Counter tracks counts of events or running totals. Example use cases for Counters: - Number of requests processed - Number of items that were inserted into a queue - Total amount of data that a system has processed Counters can only go up (and be reset when the process restarts). If your use case can go down, you should use a Gauge instead. An example for a Counter: from prometheus_client import Counter c = Counter('my_failures_total', 'Description of counter') c.inc() # Increment by 1 c.inc(1.6) # Increment by given value There are utilities to count exceptions raised: @c.count_exceptions() def f(): pass with c.count_exceptions(): pass # Count only one type of exception with c.count_exceptions(ValueError): pass """ _type = 'counter' def _metric_init(self): self._value = values.ValueClass(self._type, self._name, self._name + '_total', self._labelnames, self._labelvalues) self._created = time.time() def inc(self, amount=1): """Increment counter by the given amount.""" if amount < 0: raise ValueError('Counters can only be incremented by non-negative amounts.') self._value.inc(amount) def count_exceptions(self, exception=Exception): """Count exceptions in a block of code or function. Can be used as a function decorator or context manager. Increments the counter when an exception of the given type is raised up out of the code. """ return ExceptionCounter(self, exception) def _child_samples(self): return ( ('_total', {}, self._value.get()), ('_created', {}, self._created), ) class Gauge(MetricWrapperBase): """Gauge metric, to report instantaneous values. Examples of Gauges include: - Inprogress requests - Number of items in a queue - Free memory - Total memory - Temperature Gauges can go both up and down. from prometheus_client import Gauge g = Gauge('my_inprogress_requests', 'Description of gauge') g.inc() # Increment by 1 g.dec(10) # Decrement by given value g.set(4.2) # Set to a given value There are utilities for common use cases: g.set_to_current_time() # Set to current unixtime # Increment when entered, decrement when exited. @g.track_inprogress() def f(): pass with g.track_inprogress(): pass A Gauge can also take its value from a callback: d = Gauge('data_objects', 'Number of objects') my_dict = {} d.set_function(lambda: len(my_dict)) """ _type = 'gauge' _MULTIPROC_MODES = frozenset(('min', 'max', 'livesum', 'liveall', 'all')) def __init__(self, name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, labelvalues=None, multiprocess_mode='all', ): self._multiprocess_mode = multiprocess_mode if multiprocess_mode not in self._MULTIPROC_MODES: raise ValueError('Invalid multiprocess mode: ' + multiprocess_mode) super(Gauge, self).__init__( name=name, documentation=documentation, labelnames=labelnames, namespace=namespace, subsystem=subsystem, unit=unit, registry=registry, labelvalues=labelvalues, ) self._kwargs['multiprocess_mode'] = self._multiprocess_mode def _metric_init(self): self._value = values.ValueClass( self._type, self._name, self._name, self._labelnames, self._labelvalues, multiprocess_mode=self._multiprocess_mode ) def inc(self, amount=1): """Increment gauge by the given amount.""" self._value.inc(amount) def dec(self, amount=1): """Decrement gauge by the given amount.""" self._value.inc(-amount) def set(self, value): """Set gauge to the given value.""" self._value.set(float(value)) def set_to_current_time(self): """Set gauge to the current unixtime.""" self.set(time.time()) def track_inprogress(self): """Track inprogress blocks of code or functions. Can be used as a function decorator or context manager. Increments the gauge when the code is entered, and decrements when it is exited. """ return InprogressTracker(self) def time(self): """Time a block of code or function, and set the duration in seconds. Can be used as a function decorator or context manager. """ return Timer(self.set) def set_function(self, f): """Call the provided function to return the Gauge value. The function must return a float, and may be called from multiple threads. All other methods of the Gauge become NOOPs. """ def samples(self): return (('', {}, float(f())),) self._child_samples = create_bound_method(samples, self) def _child_samples(self): return (('', {}, self._value.get()),) class Summary(MetricWrapperBase): """A Summary tracks the size and number of events. Example use cases for Summaries: - Response latency - Request size Example for a Summary: from prometheus_client import Summary s = Summary('request_size_bytes', 'Request size (bytes)') s.observe(512) # Observe 512 (bytes) Example for a Summary using time: from prometheus_client import Summary REQUEST_TIME = Summary('response_latency_seconds', 'Response latency (seconds)') @REQUEST_TIME.time() def create_response(request): '''A dummy function''' time.sleep(1) Example for using the same Summary object as a context manager: with REQUEST_TIME.time(): pass # Logic to be timed """ _type = 'summary' _reserved_labelnames = ['quantile'] def _metric_init(self): self._count = values.ValueClass(self._type, self._name, self._name + '_count', self._labelnames, self._labelvalues) self._sum = values.ValueClass(self._type, self._name, self._name + '_sum', self._labelnames, self._labelvalues) self._created = time.time() def observe(self, amount): """Observe the given amount.""" self._count.inc(1) self._sum.inc(amount) def time(self): """Time a block of code or function, and observe the duration in seconds. Can be used as a function decorator or context manager. """ return Timer(self.observe) def _child_samples(self): return ( ('_count', {}, self._count.get()), ('_sum', {}, self._sum.get()), ('_created', {}, self._created)) class Histogram(MetricWrapperBase): """A Histogram tracks the size and number of events in buckets. You can use Histograms for aggregatable calculation of quantiles. Example use cases: - Response latency - Request size Example for a Histogram: from prometheus_client import Histogram h = Histogram('request_size_bytes', 'Request size (bytes)') h.observe(512) # Observe 512 (bytes) Example for a Histogram using time: from prometheus_client import Histogram REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)') @REQUEST_TIME.time() def create_response(request): '''A dummy function''' time.sleep(1) Example of using the same Histogram object as a context manager: with REQUEST_TIME.time(): pass # Logic to be timed The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds. They can be overridden by passing `buckets` keyword argument to `Histogram`. """ _type = 'histogram' _reserved_labelnames = ['le'] DEFAULT_BUCKETS = (.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, INF) def __init__(self, name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, labelvalues=None, buckets=DEFAULT_BUCKETS, ): self._prepare_buckets(buckets) super(Histogram, self).__init__( name=name, documentation=documentation, labelnames=labelnames, namespace=namespace, subsystem=subsystem, unit=unit, registry=registry, labelvalues=labelvalues, ) self._kwargs['buckets'] = buckets def _prepare_buckets(self, buckets): buckets = [float(b) for b in buckets] if buckets != sorted(buckets): # This is probably an error on the part of the user, # so raise rather than sorting for them. raise ValueError('Buckets not in sorted order') if buckets and buckets[-1] != INF: buckets.append(INF) if len(buckets) < 2: raise ValueError('Must have at least two buckets') self._upper_bounds = buckets def _metric_init(self): self._buckets = [] self._created = time.time() bucket_labelnames = self._labelnames + ('le',) self._sum = values.ValueClass(self._type, self._name, self._name + '_sum', self._labelnames, self._labelvalues) for b in self._upper_bounds: self._buckets.append(values.ValueClass( self._type, self._name, self._name + '_bucket', bucket_labelnames, self._labelvalues + (floatToGoString(b),)) ) def observe(self, amount): """Observe the given amount.""" self._sum.inc(amount) for i, bound in enumerate(self._upper_bounds): if amount <= bound: self._buckets[i].inc(1) break def time(self): """Time a block of code or function, and observe the duration in seconds. Can be used as a function decorator or context manager. """ return Timer(self.observe) def _child_samples(self): samples = [] acc = 0 for i, bound in enumerate(self._upper_bounds): acc += self._buckets[i].get() samples.append(('_bucket', {'le': floatToGoString(bound)}, acc)) samples.append(('_count', {}, acc)) samples.append(('_sum', {}, self._sum.get())) samples.append(('_created', {}, self._created)) return tuple(samples) class Info(MetricWrapperBase): """Info metric, key-value pairs. Examples of Info include: - Build information - Version information - Potential target metadata Example usage: from prometheus_client import Info i = Info('my_build', 'Description of info') i.info({'version': '1.2.3', 'buildhost': 'foo@bar'}) Info metrics do not work in multiprocess mode. """ _type = 'info' def _metric_init(self): self._labelname_set = set(self._labelnames) self._lock = Lock() self._value = {} def info(self, val): """Set info metric.""" if self._labelname_set.intersection(val.keys()): raise ValueError('Overlapping labels for Info metric, metric: %s child: %s' % ( self._labelnames, val)) with self._lock: self._value = dict(val) def _child_samples(self): with self._lock: return (('_info', self._value, 1.0,),) class Enum(MetricWrapperBase): """Enum metric, which of a set of states is true. Example usage: from prometheus_client import Enum e = Enum('task_state', 'Description of enum', states=['starting', 'running', 'stopped']) e.state('running') The first listed state will be the default. Enum metrics do not work in multiprocess mode. """ _type = 'stateset' def __init__(self, name, documentation, labelnames=(), namespace='', subsystem='', unit='', registry=REGISTRY, labelvalues=None, states=None, ): super(Enum, self).__init__( name=name, documentation=documentation, labelnames=labelnames, namespace=namespace, subsystem=subsystem, unit=unit, registry=registry, labelvalues=labelvalues, ) if name in labelnames: raise ValueError('Overlapping labels for Enum metric: %s' % (name,)) if not states: raise ValueError('No states provided for Enum metric: %s' % (name,)) self._kwargs['states'] = self._states = states def _metric_init(self): self._value = 0 self._lock = Lock() def state(self, state): """Set enum metric state.""" with self._lock: self._value = self._states.index(state) def _child_samples(self): with self._lock: return [ ('', {self._name: s}, 1 if i == self._value else 0,) for i, s in enumerate(self._states) ] python-prometheus-client-0.7.1/prometheus_client/metrics_core.py000066400000000000000000000267171350270547000253060ustar00rootroot00000000000000import re from .samples import Sample METRIC_TYPES = ( 'counter', 'gauge', 'summary', 'histogram', 'gaugehistogram', 'unknown', 'info', 'stateset', ) METRIC_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$') METRIC_LABEL_NAME_RE = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*$') RESERVED_METRIC_LABEL_NAME_RE = re.compile(r'^__.*$') class Metric(object): """A single metric family and its samples. This is intended only for internal use by the instrumentation client. Custom collectors should use GaugeMetricFamily, CounterMetricFamily and SummaryMetricFamily instead. """ def __init__(self, name, documentation, typ, unit=''): if unit and not name.endswith("_" + unit): name += "_" + unit if not METRIC_NAME_RE.match(name): raise ValueError('Invalid metric name: ' + name) self.name = name self.documentation = documentation self.unit = unit if typ == 'untyped': typ = 'unknown' if typ not in METRIC_TYPES: raise ValueError('Invalid metric type: ' + typ) self.type = typ self.samples = [] def add_sample(self, name, labels, value, timestamp=None, exemplar=None): """Add a sample to the metric. Internal-only, do not use.""" self.samples.append(Sample(name, labels, value, timestamp, exemplar)) def __eq__(self, other): return (isinstance(other, Metric) and self.name == other.name and self.documentation == other.documentation and self.type == other.type and self.unit == other.unit and self.samples == other.samples) def __repr__(self): return "Metric(%s, %s, %s, %s, %s)" % ( self.name, self.documentation, self.type, self.unit, self.samples, ) class UnknownMetricFamily(Metric): """A single unknwon metric and its samples. For use by custom collectors. """ def __init__(self, name, documentation, value=None, labels=None, unit=''): Metric.__init__(self, name, documentation, 'unknown', unit) if labels is not None and value is not None: raise ValueError('Can only specify at most one of value and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if value is not None: self.add_metric([], value) def add_metric(self, labels, value, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values value: The value of the metric. """ self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp)) # For backward compatibility. UntypedMetricFamily = UnknownMetricFamily class CounterMetricFamily(Metric): """A single counter and its samples. For use by custom collectors. """ def __init__(self, name, documentation, value=None, labels=None, created=None, unit=''): # Glue code for pre-OpenMetrics metrics. if name.endswith('_total'): name = name[:-6] Metric.__init__(self, name, documentation, 'counter', unit) if labels is not None and value is not None: raise ValueError('Can only specify at most one of value and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if value is not None: self.add_metric([], value, created) def add_metric(self, labels, value, created=None, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values value: The value of the metric created: Optional unix timestamp the child was created at. """ self.samples.append(Sample(self.name + '_total', dict(zip(self._labelnames, labels)), value, timestamp)) if created is not None: self.samples.append(Sample(self.name + '_created', dict(zip(self._labelnames, labels)), created, timestamp)) class GaugeMetricFamily(Metric): """A single gauge and its samples. For use by custom collectors. """ def __init__(self, name, documentation, value=None, labels=None, unit=''): Metric.__init__(self, name, documentation, 'gauge', unit) if labels is not None and value is not None: raise ValueError('Can only specify at most one of value and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if value is not None: self.add_metric([], value) def add_metric(self, labels, value, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values value: A float """ self.samples.append(Sample(self.name, dict(zip(self._labelnames, labels)), value, timestamp)) class SummaryMetricFamily(Metric): """A single summary and its samples. For use by custom collectors. """ def __init__(self, name, documentation, count_value=None, sum_value=None, labels=None, unit=''): Metric.__init__(self, name, documentation, 'summary', unit) if (sum_value is None) != (count_value is None): raise ValueError('count_value and sum_value must be provided together.') if labels is not None and count_value is not None: raise ValueError('Can only specify at most one of value and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if count_value is not None: self.add_metric([], count_value, sum_value) def add_metric(self, labels, count_value, sum_value, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values count_value: The count value of the metric. sum_value: The sum value of the metric. """ self.samples.append(Sample(self.name + '_count', dict(zip(self._labelnames, labels)), count_value, timestamp)) self.samples.append(Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp)) class HistogramMetricFamily(Metric): """A single histogram and its samples. For use by custom collectors. """ def __init__(self, name, documentation, buckets=None, sum_value=None, labels=None, unit=''): Metric.__init__(self, name, documentation, 'histogram', unit) if (sum_value is None) != (buckets is None): raise ValueError('buckets and sum_value must be provided together.') if labels is not None and buckets is not None: raise ValueError('Can only specify at most one of buckets and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if buckets is not None: self.add_metric([], buckets, sum_value) def add_metric(self, labels, buckets, sum_value, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values buckets: A list of lists. Each inner list can be a pair of bucket name and value, or a triple of bucket name, value, and exemplar. The buckets must be sorted, and +Inf present. sum_value: The sum value of the metric. """ for b in buckets: bucket, value = b[:2] exemplar = None if len(b) == 3: exemplar = b[2] self.samples.append(Sample( self.name + '_bucket', dict(list(zip(self._labelnames, labels)) + [('le', bucket)]), value, timestamp, exemplar, )) # +Inf is last and provides the count value. self.samples.extend([ Sample(self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp), Sample(self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value, timestamp), ]) class GaugeHistogramMetricFamily(Metric): """A single gauge histogram and its samples. For use by custom collectors. """ def __init__(self, name, documentation, buckets=None, gsum_value=None, labels=None, unit=''): Metric.__init__(self, name, documentation, 'gaugehistogram', unit) if labels is not None and buckets is not None: raise ValueError('Can only specify at most one of buckets and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if buckets is not None: self.add_metric([], buckets, gsum_value) def add_metric(self, labels, buckets, gsum_value, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values buckets: A list of pairs of bucket names and values. The buckets must be sorted, and +Inf present. gsum_value: The sum value of the metric. """ for bucket, value in buckets: self.samples.append(Sample( self.name + '_bucket', dict(list(zip(self._labelnames, labels)) + [('le', bucket)]), value, timestamp)) # +Inf is last and provides the count value. self.samples.extend([ Sample(self.name + '_gcount', dict(zip(self._labelnames, labels)), buckets[-1][1], timestamp), Sample(self.name + '_gsum', dict(zip(self._labelnames, labels)), gsum_value, timestamp), ]) class InfoMetricFamily(Metric): """A single info and its samples. For use by custom collectors. """ def __init__(self, name, documentation, value=None, labels=None): Metric.__init__(self, name, documentation, 'info') if labels is not None and value is not None: raise ValueError('Can only specify at most one of value and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if value is not None: self.add_metric([], value) def add_metric(self, labels, value, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values value: A dict of labels """ self.samples.append(Sample( self.name + '_info', dict(dict(zip(self._labelnames, labels)), **value), 1, timestamp, )) class StateSetMetricFamily(Metric): """A single stateset and its samples. For use by custom collectors. """ def __init__(self, name, documentation, value=None, labels=None): Metric.__init__(self, name, documentation, 'stateset') if labels is not None and value is not None: raise ValueError('Can only specify at most one of value and labels.') if labels is None: labels = [] self._labelnames = tuple(labels) if value is not None: self.add_metric([], value) def add_metric(self, labels, value, timestamp=None): """Add a metric to the metric family. Args: labels: A list of label values value: A dict of string state names to booleans """ labels = tuple(labels) for state, enabled in sorted(value.items()): v = (1 if enabled else 0) self.samples.append(Sample( self.name, dict(zip(self._labelnames + (self.name,), labels + (state,))), v, timestamp, )) python-prometheus-client-0.7.1/prometheus_client/mmap_dict.py000066400000000000000000000122101350270547000245440ustar00rootroot00000000000000import json import mmap import os import struct _INITIAL_MMAP_SIZE = 1 << 20 _pack_integer_func = struct.Struct(b'i').pack _pack_double_func = struct.Struct(b'd').pack _unpack_integer = struct.Struct(b'i').unpack_from _unpack_double = struct.Struct(b'd').unpack_from # struct.pack_into has atomicity issues because it will temporarily write 0 into # the mmap, resulting in false reads to 0 when experiencing a lot of writes. # Using direct assignment solves this issue. def _pack_double(data, pos, value): data[pos:pos + 8] = _pack_double_func(value) def _pack_integer(data, pos, value): data[pos:pos + 4] = _pack_integer_func(value) def _read_all_values(data, used=0): """Yield (key, value, pos). No locking is performed.""" if used <= 0: # If not valid `used` value is passed in, read it from the file. used = _unpack_integer(data, 0)[0] pos = 8 while pos < used: encoded_len = _unpack_integer(data, pos)[0] # check we are not reading beyond bounds if encoded_len + pos > used: raise RuntimeError('Read beyond file size detected, file is corrupted.') pos += 4 encoded_key = data[pos : pos + encoded_len] padded_len = encoded_len + (8 - (encoded_len + 4) % 8) pos += padded_len value = _unpack_double(data, pos)[0] yield encoded_key.decode('utf-8'), value, pos pos += 8 class MmapedDict(object): """A dict of doubles, backed by an mmapped file. The file starts with a 4 byte int, indicating how much of it is used. Then 4 bytes of padding. There's then a number of entries, consisting of a 4 byte int which is the size of the next field, a utf-8 encoded string key, padding to a 8 byte alignment, and then a 8 byte float which is the value. Not thread safe. """ def __init__(self, filename, read_mode=False): self._f = open(filename, 'rb' if read_mode else 'a+b') self._fname = filename capacity = os.fstat(self._f.fileno()).st_size if capacity == 0: self._f.truncate(_INITIAL_MMAP_SIZE) capacity = _INITIAL_MMAP_SIZE self._capacity = capacity self._m = mmap.mmap(self._f.fileno(), self._capacity, access=mmap.ACCESS_READ if read_mode else mmap.ACCESS_WRITE) self._positions = {} self._used = _unpack_integer(self._m, 0)[0] if self._used == 0: self._used = 8 _pack_integer(self._m, 0, self._used) else: if not read_mode: for key, _, pos in self._read_all_values(): self._positions[key] = pos @staticmethod def read_all_values_from_file(filename): with open(filename, 'rb') as infp: # Read the first block of data, including the first 4 bytes which tell us # how much of the file (which is preallocated to _INITIAL_MMAP_SIZE bytes) is occupied. data = infp.read(65535) used = _unpack_integer(data, 0)[0] if used > len(data): # Then read in the rest, if needed. data += infp.read(used - len(data)) return _read_all_values(data, used) def _init_value(self, key): """Initialize a value. Lock must be held by caller.""" encoded = key.encode('utf-8') # Pad to be 8-byte aligned. padded = encoded + (b' ' * (8 - (len(encoded) + 4) % 8)) value = struct.pack('i{0}sd'.format(len(padded)).encode(), len(encoded), padded, 0.0) while self._used + len(value) > self._capacity: self._capacity *= 2 self._f.truncate(self._capacity) self._m = mmap.mmap(self._f.fileno(), self._capacity) self._m[self._used:self._used + len(value)] = value # Update how much space we've used. self._used += len(value) _pack_integer(self._m, 0, self._used) self._positions[key] = self._used - 8 def _read_all_values(self): """Yield (key, value, pos). No locking is performed.""" return _read_all_values(data=self._m, used=self._used) def read_all_values(self): """Yield (key, value). No locking is performed.""" for k, v, _ in self._read_all_values(): yield k, v def read_value(self, key): if key not in self._positions: self._init_value(key) pos = self._positions[key] # We assume that reading from an 8 byte aligned value is atomic return _unpack_double(self._m, pos)[0] def write_value(self, key, value): if key not in self._positions: self._init_value(key) pos = self._positions[key] # We assume that writing to an 8 byte aligned value is atomic _pack_double(self._m, pos, value) def close(self): if self._f: self._m.close() self._m = None self._f.close() self._f = None def mmap_key(metric_name, name, labelnames, labelvalues): """Format a key for use in the mmap file.""" # ensure labels are in consistent order for identity labels = dict(zip(labelnames, labelvalues)) return json.dumps([metric_name, name, labels], sort_keys=True) python-prometheus-client-0.7.1/prometheus_client/multiprocess.py000066400000000000000000000145351350270547000253540ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals from collections import defaultdict import glob import json import os from .metrics_core import Metric from .mmap_dict import MmapedDict from .samples import Sample from .utils import floatToGoString try: # Python3 FileNotFoundError except NameError: # Python >= 2.5 FileNotFoundError = IOError MP_METRIC_HELP = 'Multiprocess metric' class MultiProcessCollector(object): """Collector for files for multi-process mode.""" def __init__(self, registry, path=None): if path is None: path = os.environ.get('prometheus_multiproc_dir') if not path or not os.path.isdir(path): raise ValueError('env prometheus_multiproc_dir is not set or not a directory') self._path = path if registry: registry.register(self) @staticmethod def merge(files, accumulate=True): """Merge metrics from given mmap files. By default, histograms are accumulated, as per prometheus wire format. But if writing the merged data back to mmap files, use accumulate=False to avoid compound accumulation. """ metrics = MultiProcessCollector._read_metrics(files) return MultiProcessCollector._accumulate_metrics(metrics, accumulate) @staticmethod def _read_metrics(files): metrics = {} key_cache = {} def _parse_key(key): val = key_cache.get(key) if not val: metric_name, name, labels = json.loads(key) labels_key = tuple(sorted(labels.items())) val = key_cache[key] = (metric_name, name, labels, labels_key) return val for f in files: parts = os.path.basename(f).split('_') typ = parts[0] try: file_values = MmapedDict.read_all_values_from_file(f) except FileNotFoundError: if typ == 'gauge' and parts[1] in ('liveall', 'livesum'): # Those files can disappear between the glob of collect # and now (via a mark_process_dead call) so don't fail if # the file is missing continue raise for key, value, pos in file_values: metric_name, name, labels, labels_key = _parse_key(key) metric = metrics.get(metric_name) if metric is None: metric = Metric(metric_name, MP_METRIC_HELP, typ) metrics[metric_name] = metric if typ == 'gauge': pid = parts[2][:-3] metric._multiprocess_mode = parts[1] metric.add_sample(name, labels_key + (('pid', pid),), value) else: # The duplicates and labels are fixed in the next for. metric.add_sample(name, labels_key, value) return metrics @staticmethod def _accumulate_metrics(metrics, accumulate): for metric in metrics.values(): samples = defaultdict(float) buckets = defaultdict(lambda: defaultdict(float)) samples_setdefault = samples.setdefault for s in metric.samples: name, labels, value, timestamp, exemplar = s if metric.type == 'gauge': without_pid_key = (name, tuple([l for l in labels if l[0] != 'pid'])) if metric._multiprocess_mode == 'min': current = samples_setdefault(without_pid_key, value) if value < current: samples[without_pid_key] = value elif metric._multiprocess_mode == 'max': current = samples_setdefault(without_pid_key, value) if value > current: samples[without_pid_key] = value elif metric._multiprocess_mode == 'livesum': samples[without_pid_key] += value else: # all/liveall samples[(name, labels)] = value elif metric.type == 'histogram': # A for loop with early exit is faster than a genexpr # or a listcomp that ends up building unnecessary things for l in labels: if l[0] == 'le': bucket_value = float(l[1]) # _bucket without_le = tuple(l for l in labels if l[0] != 'le') buckets[without_le][bucket_value] += value break else: # did not find the `le` key # _sum/_count samples[(name, labels)] += value else: # Counter and Summary. samples[(name, labels)] += value # Accumulate bucket values. if metric.type == 'histogram': for labels, values in buckets.items(): acc = 0.0 for bucket, value in sorted(values.items()): sample_key = ( metric.name + '_bucket', labels + (('le', floatToGoString(bucket)),), ) if accumulate: acc += value samples[sample_key] = acc else: samples[sample_key] = value if accumulate: samples[(metric.name + '_count', labels)] = acc # Convert to correct sample format. metric.samples = [Sample(name_, dict(labels), value) for (name_, labels), value in samples.items()] return metrics.values() def collect(self): files = glob.glob(os.path.join(self._path, '*.db')) return self.merge(files, accumulate=True) def mark_process_dead(pid, path=None): """Do bookkeeping for when one process dies in a multi-process setup.""" if path is None: path = os.environ.get('prometheus_multiproc_dir') for f in glob.glob(os.path.join(path, 'gauge_livesum_{0}.db'.format(pid))): os.remove(f) for f in glob.glob(os.path.join(path, 'gauge_liveall_{0}.db'.format(pid))): os.remove(f) python-prometheus-client-0.7.1/prometheus_client/openmetrics/000077500000000000000000000000001350270547000245715ustar00rootroot00000000000000python-prometheus-client-0.7.1/prometheus_client/openmetrics/__init__.py000066400000000000000000000000001350270547000266700ustar00rootroot00000000000000python-prometheus-client-0.7.1/prometheus_client/openmetrics/exposition.py000066400000000000000000000054541350270547000273540ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals from ..utils import floatToGoString CONTENT_TYPE_LATEST = str('application/openmetrics-text; version=0.0.1; charset=utf-8') """Content type of the latest OpenMetrics text format""" def generate_latest(registry): '''Returns the metrics from the registry in latest text format as a string.''' output = [] for metric in registry.collect(): try: mname = metric.name output.append('# HELP {0} {1}\n'.format( mname, metric.documentation.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))) output.append('# TYPE {0} {1}\n'.format(mname, metric.type)) if metric.unit: output.append('# UNIT {0} {1}\n'.format(mname, metric.unit)) for s in metric.samples: if s.labels: labelstr = '{{{0}}}'.format(','.join( ['{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(s.labels.items())])) else: labelstr = '' if s.exemplar: if metric.type not in ('histogram', 'gaugehistogram') or not s.name.endswith('_bucket'): raise ValueError("Metric {0} has exemplars, but is not a histogram bucket".format(metric.name)) labels = '{{{0}}}'.format(','.join( ['{0}="{1}"'.format( k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"')) for k, v in sorted(s.exemplar.labels.items())])) if s.exemplar.timestamp is not None: exemplarstr = ' # {0} {1} {2}'.format( labels, floatToGoString(s.exemplar.value), s.exemplar.timestamp, ) else: exemplarstr = ' # {0} {1}'.format( labels, floatToGoString(s.exemplar.value), ) else: exemplarstr = '' timestamp = '' if s.timestamp is not None: timestamp = ' {0}'.format(s.timestamp) output.append('{0}{1} {2}{3}{4}\n'.format( s.name, labelstr, floatToGoString(s.value), timestamp, exemplarstr, )) except Exception as exception: exception.args = (exception.args or ('',)) + (metric,) raise output.append('# EOF\n') return ''.join(output).encode('utf-8') python-prometheus-client-0.7.1/prometheus_client/openmetrics/parser.py000066400000000000000000000472341350270547000264510ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals import math import re from ..metrics_core import Metric, METRIC_LABEL_NAME_RE from ..samples import Exemplar, Sample, Timestamp from ..utils import floatToGoString try: import StringIO except ImportError: # Python 3 import io as StringIO def text_string_to_metric_families(text): """Parse Openmetrics text format from a unicode string. See text_fd_to_metric_families. """ for metric_family in text_fd_to_metric_families(StringIO.StringIO(text)): yield metric_family ESCAPE_SEQUENCES = { '\\\\': '\\', '\\n': '\n', '\\"': '"', } def _replace_escape_sequence(match): return ESCAPE_SEQUENCES[match.group(0)] ESCAPING_RE = re.compile(r'\\[\\n"]') def _replace_escaping(s): return ESCAPING_RE.sub(_replace_escape_sequence, s) def _unescape_help(text): result = [] slash = False for char in text: if slash: if char == '\\': result.append('\\') elif char == '"': result.append('"') elif char == 'n': result.append('\n') else: result.append('\\' + char) slash = False else: if char == '\\': slash = True else: result.append(char) if slash: result.append('\\') return ''.join(result) def _parse_value(value): value = ''.join(value) if value != value.strip(): raise ValueError("Invalid value: {0!r}".format(value)) try: return int(value) except ValueError: return float(value) def _parse_timestamp(timestamp): timestamp = ''.join(timestamp) if not timestamp: return None if timestamp != timestamp.strip(): raise ValueError("Invalid timestamp: {0!r}".format(timestamp)) try: # Simple int. return Timestamp(int(timestamp), 0) except ValueError: try: # aaaa.bbbb. Nanosecond resolution supported. parts = timestamp.split('.', 1) return Timestamp(int(parts[0]), int(parts[1][:9].ljust(9, "0"))) except ValueError: # Float. ts = float(timestamp) if math.isnan(ts) or math.isinf(ts): raise ValueError("Invalid timestamp: {0!r}".format(timestamp)) return ts def _is_character_escaped(s, charpos): num_bslashes = 0 while (charpos > num_bslashes and s[charpos - 1 - num_bslashes] == '\\'): num_bslashes += 1 return num_bslashes % 2 == 1 def _parse_labels_with_state_machine(text): # The { has already been parsed. state = 'startoflabelname' labelname = [] labelvalue = [] labels = {} labels_len = 0 for char in text: if state == 'startoflabelname': if char == '}': state = 'endoflabels' else: state = 'labelname' labelname.append(char) elif state == 'labelname': if char == '=': state = 'labelvaluequote' else: labelname.append(char) elif state == 'labelvaluequote': if char == '"': state = 'labelvalue' else: raise ValueError("Invalid line: " + text) elif state == 'labelvalue': if char == '\\': state = 'labelvalueslash' elif char == '"': if not METRIC_LABEL_NAME_RE.match(''.join(labelname)): raise ValueError("Invalid line: " + text) labels[''.join(labelname)] = ''.join(labelvalue) labelname = [] labelvalue = [] state = 'endoflabelvalue' else: labelvalue.append(char) elif state == 'endoflabelvalue': if char == ',': state = 'labelname' elif char == '}': state = 'endoflabels' else: raise ValueError("Invalid line: " + text) elif state == 'labelvalueslash': state = 'labelvalue' if char == '\\': labelvalue.append('\\') elif char == 'n': labelvalue.append('\n') elif char == '"': labelvalue.append('"') else: labelvalue.append('\\' + char) elif state == 'endoflabels': if char == ' ': break else: raise ValueError("Invalid line: " + text) labels_len += 1 return labels, labels_len def _parse_labels(text): labels = {} # Raise error if we don't have valid labels if text and "=" not in text: raise ValueError # Copy original labels sub_labels = text try: # Process one label at a time while sub_labels: # The label name is before the equal value_start = sub_labels.index("=") label_name = sub_labels[:value_start] sub_labels = sub_labels[value_start + 1:] # Check for missing quotes if not sub_labels or sub_labels[0] != '"': raise ValueError # The first quote is guaranteed to be after the equal value_substr = sub_labels[1:] # Check for extra commas if not label_name or label_name[0] == ',': raise ValueError if not value_substr or value_substr[-1] == ',': raise ValueError # Find the last unescaped quote i = 0 while i < len(value_substr): i = value_substr.index('"', i) if not _is_character_escaped(value_substr[:i], i): break i += 1 # The label value is inbetween the first and last quote quote_end = i + 1 label_value = sub_labels[1:quote_end] # Replace escaping if needed if "\\" in label_value: label_value = _replace_escaping(label_value) labels[label_name] = label_value # Remove the processed label from the sub-slice for next iteration sub_labels = sub_labels[quote_end + 1:] if sub_labels.startswith(","): next_comma = 1 else: next_comma = 0 sub_labels = sub_labels[next_comma:] # Check for missing commas if sub_labels and next_comma == 0: raise ValueError return labels except ValueError: raise ValueError("Invalid labels: " + text) def _parse_sample(text): # Detect the labels in the text label_start = text.find("{") if label_start == -1: # We don't have labels name_end = text.index(" ") name = text[:name_end] # Parse the remaining text after the name remaining_text = text[name_end + 1:] value, timestamp, exemplar = _parse_remaining_text(remaining_text) return Sample(name, {}, value, timestamp, exemplar) # The name is before the labels name = text[:label_start] seperator = " # " if text.count(seperator) == 0: # Line doesn't contain an exemplar # We can use `rindex` to find `label_end` label_end = text.rindex("}") label = text[label_start + 1:label_end] labels = _parse_labels(label) else: # Line potentially contains an exemplar # Fallback to parsing labels with a state machine labels, labels_len = _parse_labels_with_state_machine(text[label_start + 1:]) label_end = labels_len + len(name) # Parsing labels succeeded, continue parsing the remaining text remaining_text = text[label_end + 2:] value, timestamp, exemplar = _parse_remaining_text(remaining_text) return Sample(name, labels, value, timestamp, exemplar) def _parse_remaining_text(text): split_text = text.split(" ", 1) val = _parse_value(split_text[0]) if len(split_text) == 1: # We don't have timestamp or exemplar return val, None, None timestamp = [] exemplar_value = [] exemplar_timestamp = [] exemplar_labels = None state = 'timestamp' text = split_text[1] it = iter(text) for char in it: if state == 'timestamp': if char == '#' and not timestamp: state = 'exemplarspace' elif char == ' ': state = 'exemplarhash' else: timestamp.append(char) elif state == 'exemplarhash': if char == '#': state = 'exemplarspace' else: raise ValueError("Invalid line: " + text) elif state == 'exemplarspace': if char == ' ': state = 'exemplarstartoflabels' else: raise ValueError("Invalid line: " + text) elif state == 'exemplarstartoflabels': if char == '{': label_start, label_end = text.index("{"), text.rindex("}") exemplar_labels = _parse_labels(text[label_start + 1:label_end]) state = 'exemplarparsedlabels' else: raise ValueError("Invalid line: " + text) elif state == 'exemplarparsedlabels': if char == '}': state = 'exemplarvaluespace' elif state == 'exemplarvaluespace': if char == ' ': state = 'exemplarvalue' else: raise ValueError("Invalid line: " + text) elif state == 'exemplarvalue': if char == ' ' and not exemplar_value: raise ValueError("Invalid line: " + text) elif char == ' ': state = 'exemplartimestamp' else: exemplar_value.append(char) elif state == 'exemplartimestamp': exemplar_timestamp.append(char) # Trailing space after value. if state == 'timestamp' and not timestamp: raise ValueError("Invalid line: " + text) # Trailing space after value. if state == 'exemplartimestamp' and not exemplar_timestamp: raise ValueError("Invalid line: " + text) # Incomplete exemplar. if state in ['exemplarhash', 'exemplarspace', 'exemplarstartoflabels', 'exemplarparsedlabels']: raise ValueError("Invalid line: " + text) ts = _parse_timestamp(timestamp) exemplar = None if exemplar_labels is not None: exemplar_length = sum([len(k) + len(v) for k, v in exemplar_labels.items()]) if exemplar_length > 64: raise ValueError("Exmplar labels are too long: " + text) exemplar = Exemplar( exemplar_labels, _parse_value(exemplar_value), _parse_timestamp(exemplar_timestamp), ) return val, ts, exemplar def _group_for_sample(sample, name, typ): if typ == 'info': # We can't distinguish between groups for info metrics. return {} if typ == 'summary' and sample.name == name: d = sample.labels.copy() del d['quantile'] return d if typ == 'stateset': d = sample.labels.copy() del d[name] return d if typ in ['histogram', 'gaugehistogram'] and sample.name == name + '_bucket': d = sample.labels.copy() del d['le'] return d return sample.labels def _check_histogram(samples, name): group = None timestamp = None def do_checks(): if bucket != float('+Inf'): raise ValueError("+Inf bucket missing: " + name) if count is not None and value != count: raise ValueError("Count does not match +Inf value: " + name) for s in samples: suffix = s.name[len(name):] g = _group_for_sample(s, name, 'histogram') if g != group or s.timestamp != timestamp: if group is not None: do_checks() count = None bucket = -1 value = 0 group = g timestamp = s.timestamp if suffix == '_bucket': b = float(s.labels['le']) if b <= bucket: raise ValueError("Buckets out of order: " + name) if s.value < value: raise ValueError("Bucket values out of order: " + name) bucket = b value = s.value elif suffix in ['_count', '_gcount']: count = s.value if group is not None: do_checks() def text_fd_to_metric_families(fd): """Parse Prometheus text format from a file descriptor. This is a laxer parser than the main Go parser, so successful parsing does not imply that the parsed text meets the specification. Yields Metric's. """ name = None allowed_names = [] eof = False seen_metrics = set() def build_metric(name, documentation, typ, unit, samples): if name in seen_metrics: raise ValueError("Duplicate metric: " + name) seen_metrics.add(name) if typ is None: typ = 'unknown' if documentation is None: documentation = '' if unit is None: unit = '' if unit and not name.endswith("_" + unit): raise ValueError("Unit does not match metric name: " + name) if unit and typ in ['info', 'stateset']: raise ValueError("Units not allowed for this metric type: " + name) if typ in ['histogram', 'gaugehistogram']: _check_histogram(samples, name) metric = Metric(name, documentation, typ, unit) # TODO: check labelvalues are valid utf8 metric.samples = samples return metric for line in fd: if line[-1] == '\n': line = line[:-1] if eof: raise ValueError("Received line after # EOF: " + line) if line == '# EOF': eof = True elif line.startswith('#'): parts = line.split(' ', 3) if len(parts) < 4: raise ValueError("Invalid line: " + line) if parts[2] == name and samples: raise ValueError("Received metadata after samples: " + line) if parts[2] != name: if name is not None: yield build_metric(name, documentation, typ, unit, samples) # New metric name = parts[2] unit = None typ = None documentation = None group = None seen_groups = set() group_timestamp = None group_timestamp_samples = set() samples = [] allowed_names = [parts[2]] if parts[1] == 'HELP': if documentation is not None: raise ValueError("More than one HELP for metric: " + line) if len(parts) == 4: documentation = _unescape_help(parts[3]) elif len(parts) == 3: raise ValueError("Invalid line: " + line) elif parts[1] == 'TYPE': if typ is not None: raise ValueError("More than one TYPE for metric: " + line) typ = parts[3] if typ == 'untyped': raise ValueError("Invalid TYPE for metric: " + line) allowed_names = { 'counter': ['_total', '_created'], 'summary': ['_count', '_sum', '', '_created'], 'histogram': ['_count', '_sum', '_bucket', '_created'], 'gaugehistogram': ['_gcount', '_gsum', '_bucket'], 'info': ['_info'], }.get(typ, ['']) allowed_names = [name + n for n in allowed_names] elif parts[1] == 'UNIT': if unit is not None: raise ValueError("More than one UNIT for metric: " + line) unit = parts[3] else: raise ValueError("Invalid line: " + line) else: sample = _parse_sample(line) if sample.name not in allowed_names: if name is not None: yield build_metric(name, documentation, typ, unit, samples) # Start an unknown metric. name = sample.name documentation = None unit = None typ = 'unknown' samples = [] group = None group_timestamp = None group_timestamp_samples = set() seen_groups = set() allowed_names = [sample.name] if typ == 'stateset' and name not in sample.labels: raise ValueError("Stateset missing label: " + line) if (typ in ['histogram', 'gaugehistogram'] and name + '_bucket' == sample.name and (float(sample.labels.get('le', -1)) < 0 or sample.labels['le'] != floatToGoString(sample.labels['le']))): raise ValueError("Invalid le label: " + line) if (typ == 'summary' and name == sample.name and (not (0 <= float(sample.labels.get('quantile', -1)) <= 1) or sample.labels['quantile'] != floatToGoString(sample.labels['quantile']))): raise ValueError("Invalid quantile label: " + line) g = tuple(sorted(_group_for_sample(sample, name, typ).items())) if group is not None and g != group and g in seen_groups: raise ValueError("Invalid metric grouping: " + line) if group is not None and g == group: if (sample.timestamp is None) != (group_timestamp is None): raise ValueError("Mix of timestamp presence within a group: " + line) if group_timestamp is not None and group_timestamp > sample.timestamp and typ != 'info': raise ValueError("Timestamps went backwards within a group: " + line) else: group_timestamp_samples = set() series_id = (sample.name, tuple(sorted(sample.labels.items()))) if sample.timestamp != group_timestamp or series_id not in group_timestamp_samples: # Not a duplicate due to timestamp truncation. samples.append(sample) group_timestamp_samples.add(series_id) group = g group_timestamp = sample.timestamp seen_groups.add(g) if typ == 'stateset' and sample.value not in [0, 1]: raise ValueError("Stateset samples can only have values zero and one: " + line) if typ == 'info' and sample.value != 1: raise ValueError("Info samples can only have value one: " + line) if typ == 'summary' and name == sample.name and sample.value < 0: raise ValueError("Quantile values cannot be negative: " + line) if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and math.isnan( sample.value): raise ValueError("Counter-like samples cannot be NaN: " + line) if sample.name[len(name):] in ['_total', '_sum', '_count', '_bucket', '_gcount', '_gsum'] and sample.value < 0: raise ValueError("Counter-like samples cannot be negative: " + line) if sample.exemplar and not ( typ in ['histogram', 'gaugehistogram'] and sample.name.endswith('_bucket')): raise ValueError("Invalid line only histogram/gaugehistogram buckets can have exemplars: " + line) if name is not None: yield build_metric(name, documentation, typ, unit, samples) if not eof: raise ValueError("Missing # EOF at end") python-prometheus-client-0.7.1/prometheus_client/parser.py000077500000000000000000000155631350270547000241240ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals import re from .metrics_core import Metric from .samples import Sample try: import StringIO except ImportError: # Python 3 import io as StringIO def text_string_to_metric_families(text): """Parse Prometheus text format from a unicode string. See text_fd_to_metric_families. """ for metric_family in text_fd_to_metric_families(StringIO.StringIO(text)): yield metric_family ESCAPE_SEQUENCES = { '\\\\': '\\', '\\n': '\n', '\\"': '"', } def replace_escape_sequence(match): return ESCAPE_SEQUENCES[match.group(0)] HELP_ESCAPING_RE = re.compile(r'\\[\\n]') ESCAPING_RE = re.compile(r'\\[\\n"]') def _replace_help_escaping(s): return HELP_ESCAPING_RE.sub(replace_escape_sequence, s) def _replace_escaping(s): return ESCAPING_RE.sub(replace_escape_sequence, s) def _is_character_escaped(s, charpos): num_bslashes = 0 while (charpos > num_bslashes and s[charpos - 1 - num_bslashes] == '\\'): num_bslashes += 1 return num_bslashes % 2 == 1 def _parse_labels(labels_string): labels = {} # Return if we don't have valid labels if "=" not in labels_string: return labels escaping = False if "\\" in labels_string: escaping = True # Copy original labels sub_labels = labels_string try: # Process one label at a time while sub_labels: # The label name is before the equal value_start = sub_labels.index("=") label_name = sub_labels[:value_start] sub_labels = sub_labels[value_start + 1:].lstrip() # Find the first quote after the equal quote_start = sub_labels.index('"') + 1 value_substr = sub_labels[quote_start:] # Find the last unescaped quote i = 0 while i < len(value_substr): i = value_substr.index('"', i) if not _is_character_escaped(value_substr, i): break i += 1 # The label value is inbetween the first and last quote quote_end = i + 1 label_value = sub_labels[quote_start:quote_end] # Replace escaping if needed if escaping: label_value = _replace_escaping(label_value) labels[label_name.strip()] = label_value # Remove the processed label from the sub-slice for next iteration sub_labels = sub_labels[quote_end + 1:] next_comma = sub_labels.find(",") + 1 sub_labels = sub_labels[next_comma:].lstrip() return labels except ValueError: raise ValueError("Invalid labels: %s" % labels_string) # If we have multiple values only consider the first def _parse_value(s): s = s.lstrip() separator = " " if separator not in s: separator = "\t" i = s.find(separator) if i == -1: return s return s[:i] def _parse_sample(text): # Detect the labels in the text try: label_start, label_end = text.index("{"), text.rindex("}") # The name is before the labels name = text[:label_start].strip() # We ignore the starting curly brace label = text[label_start + 1:label_end] # The value is after the label end (ignoring curly brace and space) value = float(_parse_value(text[label_end + 2:])) return Sample(name, _parse_labels(label), value) # We don't have labels except ValueError: # Detect what separator is used separator = " " if separator not in text: separator = "\t" name_end = text.index(separator) name = text[:name_end] # The value is after the name value = float(_parse_value(text[name_end:])) return Sample(name, {}, value) def text_fd_to_metric_families(fd): """Parse Prometheus text format from a file descriptor. This is a laxer parser than the main Go parser, so successful parsing does not imply that the parsed text meets the specification. Yields Metric's. """ name = '' documentation = '' typ = 'untyped' samples = [] allowed_names = [] def build_metric(name, documentation, typ, samples): # Munge counters into OpenMetrics representation # used internally. if typ == 'counter': if name.endswith('_total'): name = name[:-6] else: new_samples = [] for s in samples: new_samples.append(Sample(s[0] + '_total', *s[1:])) samples = new_samples metric = Metric(name, documentation, typ) metric.samples = samples return metric for line in fd: line = line.strip() if line.startswith('#'): parts = line.split(None, 3) if len(parts) < 2: continue if parts[1] == 'HELP': if parts[2] != name: if name != '': yield build_metric(name, documentation, typ, samples) # New metric name = parts[2] typ = 'untyped' samples = [] allowed_names = [parts[2]] if len(parts) == 4: documentation = _replace_help_escaping(parts[3]) else: documentation = '' elif parts[1] == 'TYPE': if parts[2] != name: if name != '': yield build_metric(name, documentation, typ, samples) # New metric name = parts[2] documentation = '' samples = [] typ = parts[3] allowed_names = { 'counter': [''], 'gauge': [''], 'summary': ['_count', '_sum', ''], 'histogram': ['_count', '_sum', '_bucket'], }.get(typ, ['']) allowed_names = [name + n for n in allowed_names] else: # Ignore other comment tokens pass elif line == '': # Ignore blank lines pass else: sample = _parse_sample(line) if sample.name not in allowed_names: if name != '': yield build_metric(name, documentation, typ, samples) # New metric, yield immediately as untyped singleton name = '' documentation = '' typ = 'untyped' samples = [] allowed_names = [] yield build_metric(sample[0], documentation, typ, [sample]) else: samples.append(sample) if name != '': yield build_metric(name, documentation, typ, samples) python-prometheus-client-0.7.1/prometheus_client/platform_collector.py000066400000000000000000000033351350270547000265110ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 from __future__ import unicode_literals import platform as pf from .metrics_core import GaugeMetricFamily from .registry import REGISTRY class PlatformCollector(object): """Collector for python platform information""" def __init__(self, registry=REGISTRY, platform=None): self._platform = pf if platform is None else platform info = self._info() system = self._platform.system() if system == "Java": info.update(self._java()) self._metrics = [ self._add_metric("python_info", "Python platform information", info) ] if registry: registry.register(self) def collect(self): return self._metrics @staticmethod def _add_metric(name, documentation, data): labels = data.keys() values = [data[k] for k in labels] g = GaugeMetricFamily(name, documentation, labels=labels) g.add_metric(values, 1) return g def _info(self): major, minor, patchlevel = self._platform.python_version_tuple() return { "version": self._platform.python_version(), "implementation": self._platform.python_implementation(), "major": major, "minor": minor, "patchlevel": patchlevel } def _java(self): java_version, _, vminfo, osinfo = self._platform.java_ver() vm_name, vm_release, vm_vendor = vminfo return { "jvm_version": java_version, "jvm_release": vm_release, "jvm_vendor": vm_vendor, "jvm_name": vm_name } PLATFORM_COLLECTOR = PlatformCollector() """PlatformCollector in default Registry REGISTRY""" python-prometheus-client-0.7.1/prometheus_client/process_collector.py000066400000000000000000000070471350270547000263470ustar00rootroot00000000000000#!/usr/bin/python from __future__ import unicode_literals import os from .metrics_core import CounterMetricFamily, GaugeMetricFamily from .registry import REGISTRY try: import resource _PAGESIZE = resource.getpagesize() except ImportError: # Not Unix _PAGESIZE = 4096 class ProcessCollector(object): """Collector for Standard Exports such as cpu and memory.""" def __init__(self, namespace='', pid=lambda: 'self', proc='/proc', registry=REGISTRY): self._namespace = namespace self._pid = pid self._proc = proc if namespace: self._prefix = namespace + '_process_' else: self._prefix = 'process_' self._ticks = 100.0 try: self._ticks = os.sysconf('SC_CLK_TCK') except (ValueError, TypeError, AttributeError): pass # This is used to test if we can access /proc. self._btime = 0 try: self._btime = self._boot_time() except IOError: pass if registry: registry.register(self) def _boot_time(self): with open(os.path.join(self._proc, 'stat'), 'rb') as stat: for line in stat: if line.startswith(b'btime '): return float(line.split()[1]) def collect(self): if not self._btime: return [] pid = os.path.join(self._proc, str(self._pid()).strip()) result = [] try: with open(os.path.join(pid, 'stat'), 'rb') as stat: parts = (stat.read().split(b')')[-1].split()) vmem = GaugeMetricFamily(self._prefix + 'virtual_memory_bytes', 'Virtual memory size in bytes.', value=float(parts[20])) rss = GaugeMetricFamily(self._prefix + 'resident_memory_bytes', 'Resident memory size in bytes.', value=float(parts[21]) * _PAGESIZE) start_time_secs = float(parts[19]) / self._ticks start_time = GaugeMetricFamily(self._prefix + 'start_time_seconds', 'Start time of the process since unix epoch in seconds.', value=start_time_secs + self._btime) utime = float(parts[11]) / self._ticks stime = float(parts[12]) / self._ticks cpu = CounterMetricFamily(self._prefix + 'cpu_seconds_total', 'Total user and system CPU time spent in seconds.', value=utime + stime) result.extend([vmem, rss, start_time, cpu]) except IOError: pass try: with open(os.path.join(pid, 'limits'), 'rb') as limits: for line in limits: if line.startswith(b'Max open file'): max_fds = GaugeMetricFamily(self._prefix + 'max_fds', 'Maximum number of open file descriptors.', value=float(line.split()[3])) break open_fds = GaugeMetricFamily(self._prefix + 'open_fds', 'Number of open file descriptors.', len(os.listdir(os.path.join(pid, 'fd')))) result.extend([open_fds, max_fds]) except (IOError, OSError): pass return result PROCESS_COLLECTOR = ProcessCollector() """Default ProcessCollector in default Registry REGISTRY.""" python-prometheus-client-0.7.1/prometheus_client/registry.py000066400000000000000000000102021350270547000244560ustar00rootroot00000000000000import copy from threading import Lock from .metrics_core import Metric class CollectorRegistry(object): """Metric collector registry. Collectors must have a no-argument method 'collect' that returns a list of Metric objects. The returned metrics should be consistent with the Prometheus exposition formats. """ def __init__(self, auto_describe=False): self._collector_to_names = {} self._names_to_collectors = {} self._auto_describe = auto_describe self._lock = Lock() def register(self, collector): """Add a collector to the registry.""" with self._lock: names = self._get_names(collector) duplicates = set(self._names_to_collectors).intersection(names) if duplicates: raise ValueError( 'Duplicated timeseries in CollectorRegistry: {0}'.format( duplicates)) for name in names: self._names_to_collectors[name] = collector self._collector_to_names[collector] = names def unregister(self, collector): """Remove a collector from the registry.""" with self._lock: for name in self._collector_to_names[collector]: del self._names_to_collectors[name] del self._collector_to_names[collector] def _get_names(self, collector): """Get names of timeseries the collector produces.""" desc_func = None # If there's a describe function, use it. try: desc_func = collector.describe except AttributeError: pass # Otherwise, if auto describe is enabled use the collect function. if not desc_func and self._auto_describe: desc_func = collector.collect if not desc_func: return [] result = [] type_suffixes = { 'counter': ['_total', '_created'], 'summary': ['', '_sum', '_count', '_created'], 'histogram': ['_bucket', '_sum', '_count', '_created'], 'gaugehistogram': ['_bucket', '_gsum', '_gcount'], 'info': ['_info'], } for metric in desc_func(): for suffix in type_suffixes.get(metric.type, ['']): result.append(metric.name + suffix) return result def collect(self): """Yields metrics from the collectors in the registry.""" collectors = None with self._lock: collectors = copy.copy(self._collector_to_names) for collector in collectors: for metric in collector.collect(): yield metric def restricted_registry(self, names): """Returns object that only collects some metrics. Returns an object which upon collect() will return only samples with the given names. Intended usage is: generate_latest(REGISTRY.restricted_registry(['a_timeseries'])) Experimental.""" names = set(names) collectors = set() with self._lock: for name in names: if name in self._names_to_collectors: collectors.add(self._names_to_collectors[name]) metrics = [] for collector in collectors: for metric in collector.collect(): samples = [s for s in metric.samples if s[0] in names] if samples: m = Metric(metric.name, metric.documentation, metric.type) m.samples = samples metrics.append(m) class RestrictedRegistry(object): def collect(self): return metrics return RestrictedRegistry() def get_sample_value(self, name, labels=None): """Returns the sample value, or None if not found. This is inefficient, and intended only for use in unittests. """ if labels is None: labels = {} for metric in self.collect(): for s in metric.samples: if s.name == name and s.labels == labels: return s.value return None REGISTRY = CollectorRegistry(auto_describe=True) python-prometheus-client-0.7.1/prometheus_client/samples.py000066400000000000000000000025161350270547000242630ustar00rootroot00000000000000from collections import namedtuple class Timestamp(object): """A nanosecond-resolution timestamp.""" def __init__(self, sec, nsec): if nsec < 0 or nsec >= 1e9: raise ValueError("Invalid value for nanoseconds in Timestamp: {0}".format(nsec)) if sec < 0: nsec = -nsec self.sec = int(sec) self.nsec = int(nsec) def __str__(self): return "{0}.{1:09d}".format(self.sec, self.nsec) def __repr__(self): return "Timestamp({0}, {1})".format(self.sec, self.nsec) def __float__(self): return float(self.sec) + float(self.nsec) / 1e9 def __eq__(self, other): return type(self) == type(other) and self.sec == other.sec and self.nsec == other.nsec def __ne__(self, other): return not self == other def __gt__(self, other): return self.sec > other.sec or self.nsec > other.nsec # Timestamp and exemplar are optional. # Value can be an int or a float. # Timestamp can be a float containing a unixtime in seconds, # a Timestamp object, or None. # Exemplar can be an Exemplar object, or None. Sample = namedtuple('Sample', ['name', 'labels', 'value', 'timestamp', 'exemplar']) Sample.__new__.__defaults__ = (None, None) Exemplar = namedtuple('Exemplar', ['labels', 'value', 'timestamp']) Exemplar.__new__.__defaults__ = (None,) python-prometheus-client-0.7.1/prometheus_client/twisted/000077500000000000000000000000001350270547000237245ustar00rootroot00000000000000python-prometheus-client-0.7.1/prometheus_client/twisted/__init__.py000066400000000000000000000001101350270547000260250ustar00rootroot00000000000000from ._exposition import MetricsResource __all__ = ['MetricsResource'] python-prometheus-client-0.7.1/prometheus_client/twisted/_exposition.py000066400000000000000000000010761350270547000266420ustar00rootroot00000000000000from __future__ import absolute_import, unicode_literals from twisted.web.resource import Resource from .. import exposition, REGISTRY class MetricsResource(Resource): """ Twisted ``Resource`` that serves prometheus metrics. """ isLeaf = True def __init__(self, registry=REGISTRY): self.registry = registry def render_GET(self, request): encoder, content_type = exposition.choose_encoder(request.getHeader('Accept')) request.setHeader(b'Content-Type', content_type.encode('ascii')) return encoder(self.registry) python-prometheus-client-0.7.1/prometheus_client/utils.py000066400000000000000000000011321350270547000237500ustar00rootroot00000000000000import math INF = float("inf") MINUS_INF = float("-inf") def floatToGoString(d): d = float(d) if d == INF: return '+Inf' elif d == MINUS_INF: return '-Inf' elif math.isnan(d): return 'NaN' else: s = repr(d) dot = s.find('.') # Go switches to exponents sooner than Python. # We only need to care about positive values for le/quantile. if d > 0 and dot > 6: mantissa = '{0}.{1}{2}'.format(s[0], s[1:dot], s[dot + 1:]).rstrip('0.') return '{0}e+0{1}'.format(mantissa, dot - 1) return s python-prometheus-client-0.7.1/prometheus_client/values.py000066400000000000000000000066331350270547000241220ustar00rootroot00000000000000from __future__ import unicode_literals import os from threading import Lock from .mmap_dict import mmap_key, MmapedDict class MutexValue(object): """A float protected by a mutex.""" _multiprocess = False def __init__(self, typ, metric_name, name, labelnames, labelvalues, **kwargs): self._value = 0.0 self._lock = Lock() def inc(self, amount): with self._lock: self._value += amount def set(self, value): with self._lock: self._value = value def get(self): with self._lock: return self._value def MultiProcessValue(_pidFunc=os.getpid): files = {} values = [] pid = {'value': _pidFunc()} # Use a single global lock when in multi-processing mode # as we presume this means there is no threading going on. # This avoids the need to also have mutexes in __MmapDict. lock = Lock() class MmapedValue(object): """A float protected by a mutex backed by a per-process mmaped file.""" _multiprocess = True def __init__(self, typ, metric_name, name, labelnames, labelvalues, multiprocess_mode='', **kwargs): self._params = typ, metric_name, name, labelnames, labelvalues, multiprocess_mode with lock: self.__check_for_pid_change() self.__reset() values.append(self) def __reset(self): typ, metric_name, name, labelnames, labelvalues, multiprocess_mode = self._params if typ == 'gauge': file_prefix = typ + '_' + multiprocess_mode else: file_prefix = typ if file_prefix not in files: filename = os.path.join( os.environ['prometheus_multiproc_dir'], '{0}_{1}.db'.format(file_prefix, pid['value'])) files[file_prefix] = MmapedDict(filename) self._file = files[file_prefix] self._key = mmap_key(metric_name, name, labelnames, labelvalues) self._value = self._file.read_value(self._key) def __check_for_pid_change(self): actual_pid = _pidFunc() if pid['value'] != actual_pid: pid['value'] = actual_pid # There has been a fork(), reset all the values. for f in files.values(): f.close() files.clear() for value in values: value.__reset() def inc(self, amount): with lock: self.__check_for_pid_change() self._value += amount self._file.write_value(self._key, self._value) def set(self, value): with lock: self.__check_for_pid_change() self._value = value self._file.write_value(self._key, self._value) def get(self): with lock: self.__check_for_pid_change() return self._value return MmapedValue def get_value_class(): # Should we enable multi-process mode? # This needs to be chosen before the first metric is constructed, # and as that may be in some arbitrary library the user/admin has # no control over we use an environment variable. if 'prometheus_multiproc_dir' in os.environ: return MultiProcessValue() else: return MutexValue ValueClass = get_value_class() python-prometheus-client-0.7.1/setup.py000066400000000000000000000032131350270547000202210ustar00rootroot00000000000000from setuptools import setup setup( name="prometheus_client", version="0.7.1", author="Brian Brazil", author_email="brian.brazil@robustperception.io", description="Python client for the Prometheus monitoring system.", long_description=( "See https://github.com/prometheus/client_python/blob/master/README.md" " for documentation."), license="Apache Software License 2.0", keywords="prometheus monitoring instrumentation client", url="https://github.com/prometheus/client_python", packages=[ 'prometheus_client', 'prometheus_client.bridge', 'prometheus_client.openmetrics', 'prometheus_client.twisted', ], extras_require={ 'twisted': ['twisted'], }, test_suite="tests", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", "Topic :: System :: Monitoring", "License :: OSI Approved :: Apache Software License", ], ) python-prometheus-client-0.7.1/tests/000077500000000000000000000000001350270547000176525ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/__init__.py000066400000000000000000000000001350270547000217510ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/openmetrics/000077500000000000000000000000001350270547000222025ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/openmetrics/__init__.py000066400000000000000000000000001350270547000243010ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/openmetrics/test_exposition.py000066400000000000000000000176521350270547000260270ustar00rootroot00000000000000from __future__ import unicode_literals import sys import time from prometheus_client import ( CollectorRegistry, Counter, Enum, Gauge, Histogram, Info, Metric, Summary, ) from prometheus_client.core import ( Exemplar, GaugeHistogramMetricFamily, Timestamp, ) from prometheus_client.openmetrics.exposition import generate_latest if sys.version_info < (2, 7): # We need the skip decorators from unittest2 on Python 2.6. import unittest2 as unittest else: import unittest class TestGenerateText(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() # Mock time so _created values are fixed. self.old_time = time.time time.time = lambda: 123.456 def tearDown(self): time.time = self.old_time def custom_collector(self, metric_family): class CustomCollector(object): def collect(self): return [metric_family] self.registry.register(CustomCollector()) def test_counter(self): c = Counter('cc', 'A counter', registry=self.registry) c.inc() self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc_total 1.0\ncc_created 123.456\n# EOF\n', generate_latest(self.registry)) def test_counter_total(self): c = Counter('cc_total', 'A counter', registry=self.registry) c.inc() self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc_total 1.0\ncc_created 123.456\n# EOF\n', generate_latest(self.registry)) def test_gauge(self): g = Gauge('gg', 'A gauge', registry=self.registry) g.set(17) self.assertEqual(b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n# EOF\n', generate_latest(self.registry)) def test_summary(self): s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry) s.labels('c', 'd').observe(17) self.assertEqual(b"""# HELP ss A summary # TYPE ss summary ss_count{a="c",b="d"} 1.0 ss_sum{a="c",b="d"} 17.0 ss_created{a="c",b="d"} 123.456 # EOF """, generate_latest(self.registry)) @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_histogram(self): s = Histogram('hh', 'A histogram', registry=self.registry) s.observe(0.05) self.assertEqual(b"""# HELP hh A histogram # TYPE hh histogram hh_bucket{le="0.005"} 0.0 hh_bucket{le="0.01"} 0.0 hh_bucket{le="0.025"} 0.0 hh_bucket{le="0.05"} 1.0 hh_bucket{le="0.075"} 1.0 hh_bucket{le="0.1"} 1.0 hh_bucket{le="0.25"} 1.0 hh_bucket{le="0.5"} 1.0 hh_bucket{le="0.75"} 1.0 hh_bucket{le="1.0"} 1.0 hh_bucket{le="2.5"} 1.0 hh_bucket{le="5.0"} 1.0 hh_bucket{le="7.5"} 1.0 hh_bucket{le="10.0"} 1.0 hh_bucket{le="+Inf"} 1.0 hh_count 1.0 hh_sum 0.05 hh_created 123.456 # EOF """, generate_latest(self.registry)) def test_histogram_exemplar(self): class MyCollector(object): def collect(self): metric = Metric("hh", "help", 'histogram') # This is not sane, but it covers all the cases. metric.add_sample("hh_bucket", {"le": "1"}, 0, None, Exemplar({'a': 'b'}, 0.5)) metric.add_sample("hh_bucket", {"le": "2"}, 0, None, Exemplar({'le': '7'}, 0.5, 12)) metric.add_sample("hh_bucket", {"le": "3"}, 0, 123, Exemplar({'a': 'b'}, 2.5, 12)) metric.add_sample("hh_bucket", {"le": "4"}, 0, None, Exemplar({'a': '\n"\\'}, 3.5)) metric.add_sample("hh_bucket", {"le": "+Inf"}, 0, None, None) yield metric self.registry.register(MyCollector()) self.assertEqual(b"""# HELP hh help # TYPE hh histogram hh_bucket{le="1"} 0.0 # {a="b"} 0.5 hh_bucket{le="2"} 0.0 # {le="7"} 0.5 12 hh_bucket{le="3"} 0.0 123 # {a="b"} 2.5 12 hh_bucket{le="4"} 0.0 # {a="\\n\\"\\\\"} 3.5 hh_bucket{le="+Inf"} 0.0 # EOF """, generate_latest(self.registry)) def test_nonhistogram_exemplar(self): class MyCollector(object): def collect(self): metric = Metric("hh", "help", 'untyped') # This is not sane, but it covers all the cases. metric.add_sample("hh_bucket", {}, 0, None, Exemplar({'a': 'b'}, 0.5)) yield metric self.registry.register(MyCollector()) with self.assertRaises(ValueError): generate_latest(self.registry) def test_nonhistogram_bucket_exemplar(self): class MyCollector(object): def collect(self): metric = Metric("hh", "help", 'histogram') # This is not sane, but it covers all the cases. metric.add_sample("hh_count", {}, 0, None, Exemplar({'a': 'b'}, 0.5)) yield metric self.registry.register(MyCollector()) with self.assertRaises(ValueError): generate_latest(self.registry) def test_gaugehistogram(self): self.custom_collector( GaugeHistogramMetricFamily('gh', 'help', buckets=[('1.0', 4), ('+Inf', (5))], gsum_value=7)) self.assertEqual(b"""# HELP gh help # TYPE gh gaugehistogram gh_bucket{le="1.0"} 4.0 gh_bucket{le="+Inf"} 5.0 gh_gcount 5.0 gh_gsum 7.0 # EOF """, generate_latest(self.registry)) def test_info(self): i = Info('ii', 'A info', ['a', 'b'], registry=self.registry) i.labels('c', 'd').info({'foo': 'bar'}) self.assertEqual(b"""# HELP ii A info # TYPE ii info ii_info{a="c",b="d",foo="bar"} 1.0 # EOF """, generate_latest(self.registry)) def test_enum(self): i = Enum('ee', 'An enum', ['a', 'b'], registry=self.registry, states=['foo', 'bar']) i.labels('c', 'd').state('bar') self.assertEqual(b"""# HELP ee An enum # TYPE ee stateset ee{a="c",b="d",ee="foo"} 0.0 ee{a="c",b="d",ee="bar"} 1.0 # EOF """, generate_latest(self.registry)) def test_unicode(self): c = Counter('cc', '\u4500', ['l'], registry=self.registry) c.labels('\u4500').inc() self.assertEqual(b"""# HELP cc \xe4\x94\x80 # TYPE cc counter cc_total{l="\xe4\x94\x80"} 1.0 cc_created{l="\xe4\x94\x80"} 123.456 # EOF """, generate_latest(self.registry)) def test_escaping(self): c = Counter('cc', 'A\ncount\\er\"', ['a'], registry=self.registry) c.labels('\\x\n"').inc(1) self.assertEqual(b"""# HELP cc A\\ncount\\\\er\\" # TYPE cc counter cc_total{a="\\\\x\\n\\""} 1.0 cc_created{a="\\\\x\\n\\""} 123.456 # EOF """, generate_latest(self.registry)) def test_nonnumber(self): class MyNumber(object): def __repr__(self): return "MyNumber(123)" def __float__(self): return 123.0 class MyCollector(object): def collect(self): metric = Metric("nonnumber", "Non number", 'untyped') metric.add_sample("nonnumber", {}, MyNumber()) yield metric self.registry.register(MyCollector()) self.assertEqual(b'# HELP nonnumber Non number\n# TYPE nonnumber unknown\nnonnumber 123.0\n# EOF\n', generate_latest(self.registry)) def test_timestamp(self): class MyCollector(object): def collect(self): metric = Metric("ts", "help", 'unknown') metric.add_sample("ts", {"foo": "a"}, 0, 123.456) metric.add_sample("ts", {"foo": "b"}, 0, -123.456) metric.add_sample("ts", {"foo": "c"}, 0, 123) metric.add_sample("ts", {"foo": "d"}, 0, Timestamp(123, 456000000)) metric.add_sample("ts", {"foo": "e"}, 0, Timestamp(123, 456000)) metric.add_sample("ts", {"foo": "f"}, 0, Timestamp(123, 456)) yield metric self.registry.register(MyCollector()) self.assertEqual(b"""# HELP ts help # TYPE ts unknown ts{foo="a"} 0.0 123.456 ts{foo="b"} 0.0 -123.456 ts{foo="c"} 0.0 123 ts{foo="d"} 0.0 123.456000000 ts{foo="e"} 0.0 123.000456000 ts{foo="f"} 0.0 123.000000456 # EOF """, generate_latest(self.registry)) if __name__ == '__main__': unittest.main() python-prometheus-client-0.7.1/tests/openmetrics/test_parser.py000066400000000000000000000714011350270547000251120ustar00rootroot00000000000000from __future__ import unicode_literals import math import sys from prometheus_client.core import ( CollectorRegistry, CounterMetricFamily, Exemplar, GaugeHistogramMetricFamily, GaugeMetricFamily, HistogramMetricFamily, InfoMetricFamily, Metric, Sample, StateSetMetricFamily, SummaryMetricFamily, Timestamp, ) from prometheus_client.openmetrics.exposition import generate_latest from prometheus_client.openmetrics.parser import text_string_to_metric_families if sys.version_info < (2, 7): # We need the skip decorators from unittest2 on Python 2.6. import unittest2 as unittest else: import unittest class TestParse(unittest.TestCase): def test_simple_counter(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total 1 # EOF """) self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families)) def test_uint64_counter(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total 9223372036854775808 # EOF """) self.assertEqual([CounterMetricFamily("a", "help", value=9223372036854775808)], list(families)) def test_simple_gauge(self): families = text_string_to_metric_families("""# TYPE a gauge # HELP a help a 1 # EOF """) self.assertEqual([GaugeMetricFamily("a", "help", value=1)], list(families)) def test_float_gauge(self): families = text_string_to_metric_families("""# TYPE a gauge # HELP a help a 1.2 # EOF """) self.assertEqual([GaugeMetricFamily("a", "help", value=1.2)], list(families)) def test_nan_gauge(self): families = text_string_to_metric_families("""# TYPE a gauge # HELP a help a NaN # EOF """) self.assertTrue(math.isnan(list(families)[0].samples[0].value)) def test_unit_gauge(self): families = text_string_to_metric_families("""# TYPE a_seconds gauge # UNIT a_seconds seconds # HELP a_seconds help a_seconds 1 # EOF """) self.assertEqual([GaugeMetricFamily("a_seconds", "help", value=1, unit='seconds')], list(families)) def test_simple_summary(self): families = text_string_to_metric_families("""# TYPE a summary # HELP a help a_count 1 a_sum 2 # EOF """) summary = SummaryMetricFamily("a", "help", count_value=1, sum_value=2) self.assertEqual([summary], list(families)) def test_summary_quantiles(self): families = text_string_to_metric_families("""# TYPE a summary # HELP a help a_count 1 a_sum 2 a{quantile="0.5"} 0.7 # EOF """) # The Python client doesn't support quantiles, but we # still need to be able to parse them. metric_family = SummaryMetricFamily("a", "help", count_value=1, sum_value=2) metric_family.add_sample("a", {"quantile": "0.5"}, 0.7) self.assertEqual([metric_family], list(families)) def test_simple_histogram(self): families = text_string_to_metric_families("""# TYPE a histogram # HELP a help a_bucket{le="1.0"} 0 a_bucket{le="+Inf"} 3 a_count 3 a_sum 2 # EOF """) self.assertEqual([HistogramMetricFamily("a", "help", sum_value=2, buckets=[("1.0", 0.0), ("+Inf", 3.0)])], list(families)) def test_histogram_exemplars(self): families = text_string_to_metric_families("""# TYPE a histogram # HELP a help a_bucket{le="1.0"} 0 # {a="b"} 0.5 a_bucket{le="2.0"} 2 # {a="c"} 0.5 a_bucket{le="+Inf"} 3 # {a="1234567890123456789012345678901234567890123456789012345678"} 4 123 # EOF """) hfm = HistogramMetricFamily("a", "help") hfm.add_sample("a_bucket", {"le": "1.0"}, 0.0, None, Exemplar({"a": "b"}, 0.5)) hfm.add_sample("a_bucket", {"le": "2.0"}, 2.0, None, Exemplar({"a": "c"}, 0.5)), hfm.add_sample("a_bucket", {"le": "+Inf"}, 3.0, None, Exemplar({"a": "1234567890123456789012345678901234567890123456789012345678"}, 4, Timestamp(123, 0))) self.assertEqual([hfm], list(families)) def test_simple_gaugehistogram(self): families = text_string_to_metric_families("""# TYPE a gaugehistogram # HELP a help a_bucket{le="1.0"} 0 a_bucket{le="+Inf"} 3 a_gcount 3 a_gsum 2 # EOF """) self.assertEqual([GaugeHistogramMetricFamily("a", "help", gsum_value=2, buckets=[("1.0", 0.0), ("+Inf", 3.0)])], list(families)) def test_gaugehistogram_exemplars(self): families = text_string_to_metric_families("""# TYPE a gaugehistogram # HELP a help a_bucket{le="1.0"} 0 123 # {a="b"} 0.5 a_bucket{le="2.0"} 2 123 # {a="c"} 0.5 a_bucket{le="+Inf"} 3 123 # {a="d"} 4 123 # EOF """) hfm = GaugeHistogramMetricFamily("a", "help") hfm.add_sample("a_bucket", {"le": "1.0"}, 0.0, Timestamp(123, 0), Exemplar({"a": "b"}, 0.5)) hfm.add_sample("a_bucket", {"le": "2.0"}, 2.0, Timestamp(123, 0), Exemplar({"a": "c"}, 0.5)), hfm.add_sample("a_bucket", {"le": "+Inf"}, 3.0, Timestamp(123, 0), Exemplar({"a": "d"}, 4, Timestamp(123, 0))) self.assertEqual([hfm], list(families)) def test_simple_info(self): families = text_string_to_metric_families("""# TYPE a info # HELP a help a_info{foo="bar"} 1 # EOF """) self.assertEqual([InfoMetricFamily("a", "help", {'foo': 'bar'})], list(families)) def test_info_timestamps(self): families = text_string_to_metric_families("""# TYPE a info # HELP a help a_info{a="1",foo="bar"} 1 1 a_info{a="2",foo="bar"} 1 0 # EOF """) imf = InfoMetricFamily("a", "help") imf.add_sample("a_info", {"a": "1", "foo": "bar"}, 1, Timestamp(1, 0)) imf.add_sample("a_info", {"a": "2", "foo": "bar"}, 1, Timestamp(0, 0)) self.assertEqual([imf], list(families)) def test_simple_stateset(self): families = text_string_to_metric_families("""# TYPE a stateset # HELP a help a{a="bar"} 0 a{a="foo"} 1.0 # EOF """) self.assertEqual([StateSetMetricFamily("a", "help", {'foo': True, 'bar': False})], list(families)) def test_duplicate_timestamps(self): families = text_string_to_metric_families("""# TYPE a gauge # HELP a help a{a="1",foo="bar"} 1 0.0000000000 a{a="1",foo="bar"} 2 0.0000000001 a{a="1",foo="bar"} 3 0.0000000010 a{a="2",foo="bar"} 4 0.0000000000 a{a="2",foo="bar"} 5 0.0000000001 # EOF """) imf = GaugeMetricFamily("a", "help") imf.add_sample("a", {"a": "1", "foo": "bar"}, 1, Timestamp(0, 0)) imf.add_sample("a", {"a": "1", "foo": "bar"}, 3, Timestamp(0, 1)) imf.add_sample("a", {"a": "2", "foo": "bar"}, 4, Timestamp(0, 0)) self.assertEqual([imf], list(families)) def test_no_metadata(self): families = text_string_to_metric_families("""a 1 # EOF """) metric_family = Metric("a", "", "untyped") metric_family.add_sample("a", {}, 1) self.assertEqual([metric_family], list(families)) def test_empty_metadata(self): families = text_string_to_metric_families("""# HELP a # UNIT a # EOF """) metric_family = Metric("a", "", "untyped") self.assertEqual([metric_family], list(families)) def test_untyped(self): # https://github.com/prometheus/client_python/issues/79 families = text_string_to_metric_families("""# HELP redis_connected_clients Redis connected clients # TYPE redis_connected_clients unknown redis_connected_clients{instance="rough-snowflake-web",port="6380"} 10.0 redis_connected_clients{instance="rough-snowflake-web",port="6381"} 12.0 # EOF """) m = Metric("redis_connected_clients", "Redis connected clients", "untyped") m.samples = [ Sample("redis_connected_clients", {"instance": "rough-snowflake-web", "port": "6380"}, 10), Sample("redis_connected_clients", {"instance": "rough-snowflake-web", "port": "6381"}, 12), ] self.assertEqual([m], list(families)) def test_type_help_switched(self): families = text_string_to_metric_families("""# HELP a help # TYPE a counter a_total 1 # EOF """) self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families)) def test_labels_with_curly_braces(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total{foo="bar",bar="b{a}z"} 1 # EOF """) metric_family = CounterMetricFamily("a", "help", labels=["foo", "bar"]) metric_family.add_metric(["bar", "b{a}z"], 1) self.assertEqual([metric_family], list(families)) def test_empty_help(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a a_total 1 # EOF """) self.assertEqual([CounterMetricFamily("a", "", value=1)], list(families)) def test_labels_and_infinite(self): families = text_string_to_metric_families("""# TYPE a gauge # HELP a help a{foo="bar"} +Inf a{foo="baz"} -Inf # EOF """) metric_family = GaugeMetricFamily("a", "help", labels=["foo"]) metric_family.add_metric(["bar"], float('inf')) metric_family.add_metric(["baz"], float('-inf')) self.assertEqual([metric_family], list(families)) def test_empty_brackets(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total{} 1 # EOF """) self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families)) def test_nan(self): families = text_string_to_metric_families("""a NaN # EOF """) self.assertTrue(math.isnan(list(families)[0].samples[0][2])) def test_no_newline_after_eof(self): families = text_string_to_metric_families("""# TYPE a gauge # HELP a help a 1 # EOF""") self.assertEqual([GaugeMetricFamily("a", "help", value=1)], list(families)) def test_empty_label(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total{foo="bar"} 1 a_total{foo=""} 2 # EOF """) metric_family = CounterMetricFamily("a", "help", labels=["foo"]) metric_family.add_metric(["bar"], 1) metric_family.add_metric([""], 2) self.assertEqual([metric_family], list(families)) def test_label_escaping(self): for escaped_val, unescaped_val in [('foo', 'foo'), ('\\foo', '\\foo'), ('\\\\foo', '\\foo'), ('foo\\\\', 'foo\\'), ('\\\\', '\\'), ('\\n', '\n'), ('\\\\n', '\\n'), ('\\\\\\n', '\\\n'), ('\\"', '"'), ('\\\\\\"', '\\"')]: families = list(text_string_to_metric_families("""# TYPE a counter # HELP a help a_total{foo="%s",bar="baz"} 1 # EOF """ % escaped_val)) metric_family = CounterMetricFamily( "a", "help", labels=["foo", "bar"]) metric_family.add_metric([unescaped_val, "baz"], 1) self.assertEqual([metric_family], list(families)) def test_help_escaping(self): for escaped_val, unescaped_val in [ ('foo', 'foo'), ('\\foo', '\\foo'), ('\\\\foo', '\\foo'), ('foo\\', 'foo\\'), ('foo\\\\', 'foo\\'), ('\\n', '\n'), ('\\\\n', '\\n'), ('\\\\\\n', '\\\n'), ('\\"', '"'), ('\\\\"', '\\"'), ('\\\\\\"', '\\"')]: families = list(text_string_to_metric_families("""# TYPE a counter # HELP a %s a_total{foo="bar"} 1 # EOF """ % escaped_val)) metric_family = CounterMetricFamily("a", unescaped_val, labels=["foo"]) metric_family.add_metric(["bar"], 1) self.assertEqual([metric_family], list(families)) def test_escaping(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a he\\n\\\\l\\tp a_total{foo="b\\"a\\nr"} 1 a_total{foo="b\\\\a\\z"} 2 # EOF """) metric_family = CounterMetricFamily("a", "he\n\\l\\tp", labels=["foo"]) metric_family.add_metric(["b\"a\nr"], 1) metric_family.add_metric(["b\\a\\z"], 2) self.assertEqual([metric_family], list(families)) def test_null_byte(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a he\0lp # EOF """) metric_family = CounterMetricFamily("a", "he\0lp") self.assertEqual([metric_family], list(families)) def test_timestamps(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total{foo="1"} 1 000 a_total{foo="2"} 1 0.0 a_total{foo="3"} 1 1.1 a_total{foo="4"} 1 12345678901234567890.1234567890 a_total{foo="5"} 1 1.5e3 # TYPE b counter # HELP b help b_total 2 1234567890 # EOF """) a = CounterMetricFamily("a", "help", labels=["foo"]) a.add_metric(["1"], 1, timestamp=Timestamp(0, 0)) a.add_metric(["2"], 1, timestamp=Timestamp(0, 0)) a.add_metric(["3"], 1, timestamp=Timestamp(1, 100000000)) a.add_metric(["4"], 1, timestamp=Timestamp(12345678901234567890, 123456789)) a.add_metric(["5"], 1, timestamp=1500.0) b = CounterMetricFamily("b", "help") b.add_metric([], 2, timestamp=Timestamp(1234567890, 0)) self.assertEqual([a, b], list(families)) def test_hash_in_label_value(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total{foo="foo # bar"} 1 a_total{foo="} foo # bar # "} 1 # EOF """) a = CounterMetricFamily("a", "help", labels=["foo"]) a.add_metric(["foo # bar"], 1) a.add_metric(["} foo # bar # "], 1) self.assertEqual([a], list(families)) def test_exemplars_with_hash_in_label_values(self): families = text_string_to_metric_families("""# TYPE a histogram # HELP a help a_bucket{le="1.0",foo="bar # "} 0 # {a="b",foo="bar # bar"} 0.5 a_bucket{le="2.0",foo="bar # "} 2 # {a="c",foo="bar # bar"} 0.5 a_bucket{le="+Inf",foo="bar # "} 3 # {a="d",foo="bar # bar"} 4 # EOF """) hfm = HistogramMetricFamily("a", "help") hfm.add_sample("a_bucket", {"le": "1.0", "foo": "bar # "}, 0.0, None, Exemplar({"a": "b", "foo": "bar # bar"}, 0.5)) hfm.add_sample("a_bucket", {"le": "2.0", "foo": "bar # "}, 2.0, None, Exemplar({"a": "c", "foo": "bar # bar"}, 0.5)) hfm.add_sample("a_bucket", {"le": "+Inf", "foo": "bar # "}, 3.0, None, Exemplar({"a": "d", "foo": "bar # bar"}, 4)) self.assertEqual([hfm], list(families)) @unittest.skipIf(sys.version_info < (3, 3), "Test requires Python 3.3+.") def test_fallback_to_state_machine_label_parsing(self): from unittest.mock import patch from prometheus_client.openmetrics.parser import _parse_sample parse_sample_function = "prometheus_client.openmetrics.parser._parse_sample" parse_labels_function = "prometheus_client.openmetrics.parser._parse_labels" parse_remaining_function = "prometheus_client.openmetrics.parser._parse_remaining_text" state_machine_function = "prometheus_client.openmetrics.parser._parse_labels_with_state_machine" parse_sample_return_value = Sample("a_total", {"foo": "foo # bar"}, 1) with patch(parse_sample_function, return_value=parse_sample_return_value) as mock: families = text_string_to_metric_families("""# TYPE a counter # HELP a help a_total{foo="foo # bar"} 1 # EOF """) a = CounterMetricFamily("a", "help", labels=["foo"]) a.add_metric(["foo # bar"], 1) self.assertEqual([a], list(families)) mock.assert_called_once_with('a_total{foo="foo # bar"} 1') # First fallback case state_machine_return_values = [{"foo": "foo # bar"}, len('foo="foo # bar"}')] parse_remaining_values = [1, None, None] with patch(parse_labels_function) as mock1: with patch(state_machine_function, return_value=state_machine_return_values) as mock2: with patch(parse_remaining_function, return_value=parse_remaining_values) as mock3: sample = _parse_sample('a_total{foo="foo # bar"} 1') s = Sample("a_total", {"foo": "foo # bar"}, 1) self.assertEqual(s, sample) mock1.assert_not_called() mock2.assert_called_once_with('foo="foo # bar"} 1') mock3.assert_called_once_with('1') # Second fallback case state_machine_return_values = [{"le": "1.0"}, len('le="1.0"}')] parse_remaining_values = [0.0, Timestamp(123, 0), Exemplar({"a": "b"}, 0.5)] with patch(parse_labels_function) as mock1: with patch(state_machine_function, return_value=state_machine_return_values) as mock2: with patch(parse_remaining_function, return_value=parse_remaining_values) as mock3: sample = _parse_sample('a_bucket{le="1.0"} 0 123 # {a="b"} 0.5') s = Sample("a_bucket", {"le": "1.0"}, 0.0, Timestamp(123, 0), Exemplar({"a": "b"}, 0.5)) self.assertEqual(s, sample) mock1.assert_not_called() mock2.assert_called_once_with('le="1.0"} 0 123 # {a="b"} 0.5') mock3.assert_called_once_with('0 123 # {a="b"} 0.5') # No need to fallback case parse_labels_return_values = {"foo": "foo#bar"} parse_remaining_values = [1, None, None] with patch(parse_labels_function, return_value=parse_labels_return_values) as mock1: with patch(state_machine_function) as mock2: with patch(parse_remaining_function, return_value=parse_remaining_values) as mock3: sample = _parse_sample('a_total{foo="foo#bar"} 1') s = Sample("a_total", {"foo": "foo#bar"}, 1) self.assertEqual(s, sample) mock1.assert_called_once_with('foo="foo#bar"') mock2.assert_not_called() mock3.assert_called_once_with('1') @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_roundtrip(self): text = """# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0.0"} 0.013300656000000001 go_gc_duration_seconds{quantile="0.25"} 0.013638736 go_gc_duration_seconds{quantile="0.5"} 0.013759906 go_gc_duration_seconds{quantile="0.75"} 0.013962066 go_gc_duration_seconds{quantile="1.0"} 0.021383540000000003 go_gc_duration_seconds_sum 56.12904785 go_gc_duration_seconds_count 7476.0 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 166.0 # HELP prometheus_local_storage_indexing_batch_duration_milliseconds Quantiles for batch indexing duration in milliseconds. # TYPE prometheus_local_storage_indexing_batch_duration_milliseconds summary prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.5"} NaN prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.9"} NaN prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.99"} NaN prometheus_local_storage_indexing_batch_duration_milliseconds_sum 871.5665949999999 prometheus_local_storage_indexing_batch_duration_milliseconds_count 229.0 # HELP process_cpu_seconds Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds counter process_cpu_seconds_total 29323.4 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 2.478268416e+09 # HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, and branch from which Prometheus was built. # TYPE prometheus_build_info gauge prometheus_build_info{branch="HEAD",revision="ef176e5",version="0.16.0rc1"} 1.0 # HELP prometheus_local_storage_chunk_ops The total number of chunk operations by their type. # TYPE prometheus_local_storage_chunk_ops counter prometheus_local_storage_chunk_ops_total{type="clone"} 28.0 prometheus_local_storage_chunk_ops_total{type="create"} 997844.0 prometheus_local_storage_chunk_ops_total{type="drop"} 1.345758e+06 prometheus_local_storage_chunk_ops_total{type="load"} 1641.0 prometheus_local_storage_chunk_ops_total{type="persist"} 981408.0 prometheus_local_storage_chunk_ops_total{type="pin"} 32662.0 prometheus_local_storage_chunk_ops_total{type="transcode"} 980180.0 prometheus_local_storage_chunk_ops_total{type="unpin"} 32662.0 # HELP foo histogram Testing histogram buckets # TYPE foo histogram foo_bucket{le="0.0"} 0.0 foo_bucket{le="1e-05"} 0.0 foo_bucket{le="0.0001"} 0.0 foo_bucket{le="0.1"} 8.0 foo_bucket{le="1.0"} 10.0 foo_bucket{le="10.0"} 17.0 foo_bucket{le="100000.0"} 17.0 foo_bucket{le="1e+06"} 17.0 foo_bucket{le="1.55555555555552e+06"} 17.0 foo_bucket{le="1e+23"} 17.0 foo_bucket{le="+Inf"} 17.0 foo_count 17.0 foo_sum 324789.3 foo_created 1.520430000123e+09 # EOF """ families = list(text_string_to_metric_families(text)) class TextCollector(object): def collect(self): return families registry = CollectorRegistry() registry.register(TextCollector()) self.assertEqual(text.encode('utf-8'), generate_latest(registry)) def test_invalid_input(self): for case in [ # No EOF. (''), # Text after EOF. ('a 1\n# EOF\nblah'), ('a 1\n# EOFblah'), # Missing or wrong quotes on label value. ('a{a=1} 1\n# EOF\n'), ('a{a="1} 1\n# EOF\n'), ('a{a=\'1\'} 1\n# EOF\n'), # Missing equal or label value. ('a{a} 1\n# EOF\n'), ('a{a"value"} 1\n# EOF\n'), ('a{a""} 1\n# EOF\n'), ('a{a=} 1\n# EOF\n'), ('a{a="} 1\n# EOF\n'), # Missing or extra commas. ('a{a="1"b="2"} 1\n# EOF\n'), ('a{a="1",,b="2"} 1\n# EOF\n'), ('a{a="1",b="2",} 1\n# EOF\n'), # Missing value. ('a\n# EOF\n'), ('a \n# EOF\n'), # Bad HELP. ('# HELP\n# EOF\n'), ('# HELP \n# EOF\n'), ('# HELP a\n# EOF\n'), ('# HELP a\t\n# EOF\n'), (' # HELP a meh\n# EOF\n'), # Bad TYPE. ('# TYPE\n# EOF\n'), ('# TYPE \n# EOF\n'), ('# TYPE a\n# EOF\n'), ('# TYPE a\t\n# EOF\n'), ('# TYPE a meh\n# EOF\n'), ('# TYPE a meh \n# EOF\n'), ('# TYPE a gauge \n# EOF\n'), ('# TYPE a untyped\n# EOF\n'), # Bad UNIT. ('# UNIT\n# EOF\n'), ('# UNIT \n# EOF\n'), ('# UNIT a\n# EOF\n'), ('# UNIT a\t\n# EOF\n'), ('# UNIT a seconds\n# EOF\n'), ('# UNIT a_seconds seconds \n# EOF\n'), ('# TYPE x_u info\n# UNIT x_u u\n# EOF\n'), ('# TYPE x_u stateset\n# UNIT x_u u\n# EOF\n'), # Metadata in wrong place. ('# HELP a x\na 1\n# TYPE a gauge\n# EOF\n'), ('# TYPE a gauge\na 1\n# HELP a gauge\n# EOF\n'), ('# TYPE a_s gauge\na_s 1\n# UNIT a_s s\n# EOF\n'), # Repeated metadata. ('# HELP a \n# HELP a \n# EOF\n'), ('# HELP a x\n# HELP a x\n# EOF\n'), ('# TYPE a untyped\n# TYPE a untyped\n# EOF\n'), ('# UNIT a_s s\n# UNIT a_s s\n# EOF\n'), # Bad metric names. ('0a 1\n# EOF\n'), ('a.b 1\n# EOF\n'), ('a-b 1\n# EOF\n'), # Bad value. ('a a\n# EOF\n'), ('a 1\n# EOF\n'), ('a 1\t\n# EOF\n'), ('a 1 \n# EOF\n'), # Bad timestamp. ('a 1 z\n# EOF\n'), ('a 1 1z\n# EOF\n'), ('a 1 1.1.1\n# EOF\n'), ('a 1 NaN\n# EOF\n'), ('a 1 Inf\n# EOF\n'), ('a 1 +Inf\n# EOF\n'), ('a 1 -Inf\n# EOF\n'), # Bad exemplars. ('# TYPE a histogram\na_bucket{le="+Inf"} 1 #\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} 1# {} 1\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} 1 #{} 1\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} 1 # {}1\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} 1 # {} 1 \n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} 1 # {} 1 1 \n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} 1 # ' '{a="2345678901234567890123456789012345678901234567890123456789012345"} 1 1\n# EOF\n'), # Exemplars on unallowed samples. ('# TYPE a histogram\na_sum 1 # {a="b"} 0.5\n# EOF\n'), ('# TYPE a gaugehistogram\na_sum 1 # {a="b"} 0.5\n# EOF\n'), ('# TYPE a_bucket gauge\na_bucket 1 # {a="b"} 0.5\n# EOF\n'), # Exemplars on unallowed metric types. ('# TYPE a counter\na_total 1 # {a="b"} 1\n# EOF\n'), ('# TYPE a gauge\na 1 # {a="b"} 1\n# EOF\n'), # Bad stateset/info values. ('# TYPE a stateset\na 2\n# EOF\n'), ('# TYPE a info\na 2\n# EOF\n'), ('# TYPE a stateset\na 2.0\n# EOF\n'), ('# TYPE a info\na 2.0\n# EOF\n'), # Missing or invalid labels for a type. ('# TYPE a summary\na 0\n# EOF\n'), ('# TYPE a summary\na{quantile="-1"} 0\n# EOF\n'), ('# TYPE a summary\na{quantile="foo"} 0\n# EOF\n'), ('# TYPE a summary\na{quantile="1.01"} 0\n# EOF\n'), ('# TYPE a summary\na{quantile="NaN"} 0\n# EOF\n'), ('# TYPE a summary\na{quantile="1"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket 0\n# EOF\n'), ('# TYPE a gaugehistogram\na_bucket 0\n# EOF\n'), ('# TYPE a stateset\na 0\n# EOF\n'), # Bad counter values. ('# TYPE a counter\na_total NaN\n# EOF\n'), ('# TYPE a counter\na_total -1\n# EOF\n'), ('# TYPE a histogram\na_sum NaN\n# EOF\n'), ('# TYPE a histogram\na_count NaN\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} NaN\n# EOF\n'), ('# TYPE a histogram\na_sum -1\n# EOF\n'), ('# TYPE a histogram\na_count -1\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} -1\n# EOF\n'), ('# TYPE a gaugehistogram\na_bucket{le="+Inf"} NaN\n# EOF\n'), ('# TYPE a gaugehistogram\na_bucket{le="+Inf"} -1\na_gcount -1\n# EOF\n'), ('# TYPE a gaugehistogram\na_bucket{le="+Inf"} -1\n# EOF\n'), ('# TYPE a gaugehistogram\na_bucket{le="+Inf"} 1\na_gsum -1\n# EOF\n'), ('# TYPE a gaugehistogram\na_bucket{le="+Inf"} 1\na_gsum NaN\n# EOF\n'), ('# TYPE a summary\na_sum NaN\n# EOF\n'), ('# TYPE a summary\na_count NaN\n# EOF\n'), ('# TYPE a summary\na_sum -1\n# EOF\n'), ('# TYPE a summary\na_count -1\n# EOF\n'), ('# TYPE a summary\na{quantile="0.5"} -1\n# EOF\n'), # Bad histograms. ('# TYPE a histogram\na_sum 1\n# EOF\n'), ('# TYPE a gaugehistogram\na_gsum 1\n# EOF\n'), ('# TYPE a histogram\na_count 1\na_bucket{le="+Inf"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+Inf"} 0\na_count 1\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="1"} 0\na_bucket{le="+Inf"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="1e-04"} 0\na_bucket{le="+Inf"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="1e+05"} 0\na_bucket{le="+Inf"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="+INF"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="2"} 0\na_bucket{le="1"} 0\na_bucket{le="+Inf"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="1"} 1\na_bucket{le="2"} 1\na_bucket{le="+Inf"} 0\n# EOF\n'), # Bad grouping or ordering. ('# TYPE a histogram\na_sum{a="1"} 0\na_sum{a="2"} 0\na_count{a="1"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{a="1",le="1"} 0\na_bucket{a="2",le="+Inf""} ' '0\na_bucket{a="1",le="+Inf"} 0\n# EOF\n'), ('# TYPE a gaugehistogram\na_gsum{a="1"} 0\na_gsum{a="2"} 0\na_gcount{a="1"} 0\n# EOF\n'), ('# TYPE a summary\nquantile{quantile="0"} 0\na_sum{a="1"} 0\nquantile{quantile="1"} 0\n# EOF\n'), ('# TYPE a gauge\na 0 -1\na 0 -2\n# EOF\n'), ('# TYPE a gauge\na 0 -1\na 0 -1.1\n# EOF\n'), ('# TYPE a gauge\na 0 1\na 0 -1\n# EOF\n'), ('# TYPE a gauge\na 0 1.1\na 0 1\n# EOF\n'), ('# TYPE a gauge\na 0 1\na 0 0\n# EOF\n'), ('# TYPE a gauge\na 0\na 0 0\n# EOF\n'), ('# TYPE a gauge\na 0 0\na 0\n# EOF\n'), ]: with self.assertRaises(ValueError): list(text_string_to_metric_families(case)) @unittest.skipIf(sys.version_info < (2, 7), "float repr changed from 2.6 to 2.7") def test_invalid_float_input(self): for case in [ # Bad histograms. ('# TYPE a histogram\na_bucket{le="9.999999999999999e+22"} 0\na_bucket{le="+Inf"} 0\n# EOF\n'), ('# TYPE a histogram\na_bucket{le="1.5555555555555201e+06"} 0\na_bucket{le="+Inf"} 0\n# EOF\n'), ]: with self.assertRaises(ValueError): list(text_string_to_metric_families(case)) if __name__ == '__main__': unittest.main() python-prometheus-client-0.7.1/tests/proc/000077500000000000000000000000001350270547000206155ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/000077500000000000000000000000001350270547000212725ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/fd/000077500000000000000000000000001350270547000216635ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/fd/0000066400000000000000000000000001350270547000217330ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/fd/1000066400000000000000000000000001350270547000217340ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/fd/2000066400000000000000000000000001350270547000217350ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/fd/3000066400000000000000000000000001350270547000217360ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/fd/4000066400000000000000000000000001350270547000217370ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/26231/limits000066400000000000000000000021051350270547000225140ustar00rootroot00000000000000Limit Soft Limit Hard Limit Units Max cpu time unlimited unlimited seconds Max file size unlimited unlimited bytes Max data size unlimited unlimited bytes Max stack size 8388608 unlimited bytes Max core file size 0 unlimited bytes Max resident set unlimited unlimited bytes Max processes 62898 62898 processes Max open files 2048 4096 files Max locked memory 65536 65536 bytes Max address space unlimited unlimited bytes Max file locks unlimited unlimited locks Max pending signals 62898 62898 signals Max msgqueue size 819200 819200 bytes Max nice priority 0 0 python-prometheus-client-0.7.1/tests/proc/26231/stat000066400000000000000000000005121350270547000221660ustar00rootroot0000000000000026231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 python-prometheus-client-0.7.1/tests/proc/584/000077500000000000000000000000001350270547000211355ustar00rootroot00000000000000python-prometheus-client-0.7.1/tests/proc/584/stat000066400000000000000000000004721350270547000220360ustar00rootroot000000000000001020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 python-prometheus-client-0.7.1/tests/proc/stat000066400000000000000000000040561350270547000215200ustar00rootroot00000000000000cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 cpu1 47869 23 16474 1110787 591 0 46 0 0 0 cpu2 46504 36 15916 1112321 441 0 326 0 0 0 cpu3 47054 102 15683 1113230 533 0 60 0 0 0 cpu4 28413 25 10776 1140321 217 0 8 0 0 0 cpu5 29271 101 11586 1136270 672 0 30 0 0 0 cpu6 29152 36 10276 1139721 319 0 29 0 0 0 cpu7 29098 268 10164 1139282 555 0 31 0 0 0 intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 ctxt 38014093 btime 1418183276 processes 26442 procs_running 2 procs_blocked 0 softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 python-prometheus-client-0.7.1/tests/test_core.py000066400000000000000000000757141350270547000222310ustar00rootroot00000000000000from __future__ import unicode_literals from concurrent.futures import ThreadPoolExecutor import inspect import time import pytest from prometheus_client.core import ( CollectorRegistry, Counter, CounterMetricFamily, Enum, Gauge, GaugeHistogramMetricFamily, GaugeMetricFamily, Histogram, HistogramMetricFamily, Info, InfoMetricFamily, Metric, Sample, StateSetMetricFamily, Summary, SummaryMetricFamily, UntypedMetricFamily, ) try: import unittest2 as unittest except ImportError: import unittest class TestCounter(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.counter = Counter('c_total', 'help', registry=self.registry) def test_increment(self): self.assertEqual(0, self.registry.get_sample_value('c_total')) self.counter.inc() self.assertEqual(1, self.registry.get_sample_value('c_total')) self.counter.inc(7) self.assertEqual(8, self.registry.get_sample_value('c_total')) def test_negative_increment_raises(self): self.assertRaises(ValueError, self.counter.inc, -1) def test_function_decorator(self): @self.counter.count_exceptions(ValueError) def f(r): if r: raise ValueError else: raise TypeError self.assertEqual((["r"], None, None, None), inspect.getargspec(f)) try: f(False) except TypeError: pass self.assertEqual(0, self.registry.get_sample_value('c_total')) try: f(True) except ValueError: pass self.assertEqual(1, self.registry.get_sample_value('c_total')) def test_block_decorator(self): with self.counter.count_exceptions(): pass self.assertEqual(0, self.registry.get_sample_value('c_total')) raised = False try: with self.counter.count_exceptions(): raise ValueError except: raised = True self.assertTrue(raised) self.assertEqual(1, self.registry.get_sample_value('c_total')) class TestGauge(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.gauge = Gauge('g', 'help', registry=self.registry) def test_gauge(self): self.assertEqual(0, self.registry.get_sample_value('g')) self.gauge.inc() self.assertEqual(1, self.registry.get_sample_value('g')) self.gauge.dec(3) self.assertEqual(-2, self.registry.get_sample_value('g')) self.gauge.set(9) self.assertEqual(9, self.registry.get_sample_value('g')) def test_inprogress_function_decorator(self): self.assertEqual(0, self.registry.get_sample_value('g')) @self.gauge.track_inprogress() def f(): self.assertEqual(1, self.registry.get_sample_value('g')) self.assertEqual(([], None, None, None), inspect.getargspec(f)) f() self.assertEqual(0, self.registry.get_sample_value('g')) def test_inprogress_block_decorator(self): self.assertEqual(0, self.registry.get_sample_value('g')) with self.gauge.track_inprogress(): self.assertEqual(1, self.registry.get_sample_value('g')) self.assertEqual(0, self.registry.get_sample_value('g')) def test_gauge_function(self): x = {} self.gauge.set_function(lambda: len(x)) self.assertEqual(0, self.registry.get_sample_value('g')) self.gauge.inc() self.assertEqual(0, self.registry.get_sample_value('g')) x['a'] = None self.assertEqual(1, self.registry.get_sample_value('g')) def test_time_function_decorator(self): self.assertEqual(0, self.registry.get_sample_value('g')) @self.gauge.time() def f(): time.sleep(.001) self.assertEqual(([], None, None, None), inspect.getargspec(f)) f() self.assertNotEqual(0, self.registry.get_sample_value('g')) def test_function_decorator_multithread(self): self.assertEqual(0, self.registry.get_sample_value('g')) workers = 2 pool = ThreadPoolExecutor(max_workers=workers) @self.gauge.time() def f(duration): time.sleep(duration) expected_duration = 1 pool.submit(f, expected_duration) time.sleep(0.7 * expected_duration) pool.submit(f, expected_duration * 2) time.sleep(expected_duration) rounding_coefficient = 0.9 adjusted_expected_duration = expected_duration * rounding_coefficient self.assertLess(adjusted_expected_duration, self.registry.get_sample_value('g')) pool.shutdown(wait=True) def test_time_block_decorator(self): self.assertEqual(0, self.registry.get_sample_value('g')) with self.gauge.time(): time.sleep(.001) self.assertNotEqual(0, self.registry.get_sample_value('g')) class TestSummary(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.summary = Summary('s', 'help', registry=self.registry) def test_summary(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) self.assertEqual(0, self.registry.get_sample_value('s_sum')) self.summary.observe(10) self.assertEqual(1, self.registry.get_sample_value('s_count')) self.assertEqual(10, self.registry.get_sample_value('s_sum')) def test_function_decorator(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) @self.summary.time() def f(): pass self.assertEqual(([], None, None, None), inspect.getargspec(f)) f() self.assertEqual(1, self.registry.get_sample_value('s_count')) def test_function_decorator_multithread(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) summary2 = Summary('s2', 'help', registry=self.registry) workers = 3 duration = 0.1 pool = ThreadPoolExecutor(max_workers=workers) @self.summary.time() def f(): time.sleep(duration / 2) # Testing that different instances of timer do not interfere summary2.time()(lambda: time.sleep(duration / 2))() jobs = workers * 3 for i in range(jobs): pool.submit(f) pool.shutdown(wait=True) self.assertEqual(jobs, self.registry.get_sample_value('s_count')) rounding_coefficient = 0.9 total_expected_duration = jobs * duration * rounding_coefficient self.assertLess(total_expected_duration, self.registry.get_sample_value('s_sum')) self.assertLess(total_expected_duration / 2, self.registry.get_sample_value('s2_sum')) def test_function_decorator_reentrancy(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) iterations = 2 sleep = 0.1 @self.summary.time() def f(i=1): time.sleep(sleep) if i == iterations: return f(i + 1) f() self.assertEqual(iterations, self.registry.get_sample_value('s_count')) # Arithmetic series with d == a_1 total_expected_duration = sleep * (iterations ** 2 + iterations) / 2 rounding_coefficient = 0.9 total_expected_duration *= rounding_coefficient self.assertLess(total_expected_duration, self.registry.get_sample_value('s_sum')) def test_block_decorator(self): self.assertEqual(0, self.registry.get_sample_value('s_count')) with self.summary.time(): pass self.assertEqual(1, self.registry.get_sample_value('s_count')) class TestHistogram(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.histogram = Histogram('h', 'help', registry=self.registry) self.labels = Histogram('hl', 'help', ['l'], registry=self.registry) def test_histogram(self): self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(0, self.registry.get_sample_value('h_count')) self.assertEqual(0, self.registry.get_sample_value('h_sum')) self.histogram.observe(2) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(1, self.registry.get_sample_value('h_count')) self.assertEqual(2, self.registry.get_sample_value('h_sum')) self.histogram.observe(2.5) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(2, self.registry.get_sample_value('h_count')) self.assertEqual(4.5, self.registry.get_sample_value('h_sum')) self.histogram.observe(float("inf")) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) self.assertEqual(3, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(3, self.registry.get_sample_value('h_count')) self.assertEqual(float("inf"), self.registry.get_sample_value('h_sum')) def test_setting_buckets(self): h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2]) self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds) h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2, float("inf")]) self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds) self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[]) self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[float("inf")]) self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[3, 1]) def test_labels(self): self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, labelnames=['le']) self.labels.labels('a').observe(2) self.assertEqual(0, self.registry.get_sample_value('hl_bucket', {'le': '1.0', 'l': 'a'})) self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '2.5', 'l': 'a'})) self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '5.0', 'l': 'a'})) self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '+Inf', 'l': 'a'})) self.assertEqual(1, self.registry.get_sample_value('hl_count', {'l': 'a'})) self.assertEqual(2, self.registry.get_sample_value('hl_sum', {'l': 'a'})) def test_function_decorator(self): self.assertEqual(0, self.registry.get_sample_value('h_count')) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) @self.histogram.time() def f(): pass self.assertEqual(([], None, None, None), inspect.getargspec(f)) f() self.assertEqual(1, self.registry.get_sample_value('h_count')) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) def test_function_decorator_multithread(self): self.assertEqual(0, self.registry.get_sample_value('h_count')) workers = 3 duration = 0.1 pool = ThreadPoolExecutor(max_workers=workers) @self.histogram.time() def f(): time.sleep(duration) jobs = workers * 3 for i in range(jobs): pool.submit(f) pool.shutdown(wait=True) self.assertEqual(jobs, self.registry.get_sample_value('h_count')) rounding_coefficient = 0.9 total_expected_duration = jobs * duration * rounding_coefficient self.assertLess(total_expected_duration, self.registry.get_sample_value('h_sum')) def test_block_decorator(self): self.assertEqual(0, self.registry.get_sample_value('h_count')) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) with self.histogram.time(): pass self.assertEqual(1, self.registry.get_sample_value('h_count')) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) class TestInfo(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.info = Info('i', 'help', registry=self.registry) self.labels = Info('il', 'help', ['l'], registry=self.registry) def test_info(self): self.assertEqual(1, self.registry.get_sample_value('i_info', {})) self.info.info({'a': 'b', 'c': 'd'}) self.assertEqual(None, self.registry.get_sample_value('i_info', {})) self.assertEqual(1, self.registry.get_sample_value('i_info', {'a': 'b', 'c': 'd'})) def test_labels(self): self.assertRaises(ValueError, self.labels.labels('a').info, {'l': ''}) self.labels.labels('a').info({'foo': 'bar'}) self.assertEqual(1, self.registry.get_sample_value('il_info', {'l': 'a', 'foo': 'bar'})) class TestEnum(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.enum = Enum('e', 'help', states=['a', 'b', 'c'], registry=self.registry) self.labels = Enum('el', 'help', ['l'], states=['a', 'b', 'c'], registry=self.registry) def test_enum(self): self.assertEqual(1, self.registry.get_sample_value('e', {'e': 'a'})) self.assertEqual(0, self.registry.get_sample_value('e', {'e': 'b'})) self.assertEqual(0, self.registry.get_sample_value('e', {'e': 'c'})) self.enum.state('b') self.assertEqual(0, self.registry.get_sample_value('e', {'e': 'a'})) self.assertEqual(1, self.registry.get_sample_value('e', {'e': 'b'})) self.assertEqual(0, self.registry.get_sample_value('e', {'e': 'c'})) self.assertRaises(ValueError, self.enum.state, 'd') self.assertRaises(ValueError, Enum, 'e', 'help', registry=None) def test_labels(self): self.labels.labels('a').state('c') self.assertEqual(0, self.registry.get_sample_value('el', {'l': 'a', 'el': 'a'})) self.assertEqual(0, self.registry.get_sample_value('el', {'l': 'a', 'el': 'b'})) self.assertEqual(1, self.registry.get_sample_value('el', {'l': 'a', 'el': 'c'})) def test_overlapping_labels(self): with pytest.raises(ValueError): Enum('e', 'help', registry=None, labelnames=['e']) class TestMetricWrapper(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.counter = Counter('c_total', 'help', labelnames=['l'], registry=self.registry) self.two_labels = Counter('two', 'help', labelnames=['a', 'b'], registry=self.registry) def test_child(self): self.counter.labels('x').inc() self.assertEqual(1, self.registry.get_sample_value('c_total', {'l': 'x'})) self.two_labels.labels('x', 'y').inc(2) self.assertEqual(2, self.registry.get_sample_value('two_total', {'a': 'x', 'b': 'y'})) def test_remove(self): self.counter.labels('x').inc() self.counter.labels('y').inc(2) self.assertEqual(1, self.registry.get_sample_value('c_total', {'l': 'x'})) self.assertEqual(2, self.registry.get_sample_value('c_total', {'l': 'y'})) self.counter.remove('x') self.assertEqual(None, self.registry.get_sample_value('c_total', {'l': 'x'})) self.assertEqual(2, self.registry.get_sample_value('c_total', {'l': 'y'})) def test_incorrect_label_count_raises(self): self.assertRaises(ValueError, self.counter.labels) self.assertRaises(ValueError, self.counter.labels, 'a', 'b') self.assertRaises(ValueError, self.counter.remove) self.assertRaises(ValueError, self.counter.remove, 'a', 'b') def test_labels_on_labels(self): with pytest.raises(ValueError): self.counter.labels('a').labels('b') def test_labels_coerced_to_string(self): self.counter.labels(None).inc() self.counter.labels(l=None).inc() self.assertEqual(2, self.registry.get_sample_value('c_total', {'l': 'None'})) self.counter.remove(None) self.assertEqual(None, self.registry.get_sample_value('c_total', {'l': 'None'})) def test_non_string_labels_raises(self): class Test(object): __str__ = None self.assertRaises(TypeError, self.counter.labels, Test()) self.assertRaises(TypeError, self.counter.labels, l=Test()) def test_namespace_subsystem_concatenated(self): c = Counter('c_total', 'help', namespace='a', subsystem='b', registry=self.registry) c.inc() self.assertEqual(1, self.registry.get_sample_value('a_b_c_total')) def test_labels_by_kwarg(self): self.counter.labels(l='x').inc() self.assertEqual(1, self.registry.get_sample_value('c_total', {'l': 'x'})) self.assertRaises(ValueError, self.counter.labels, l='x', m='y') self.assertRaises(ValueError, self.counter.labels, m='y') self.assertRaises(ValueError, self.counter.labels) self.two_labels.labels(a='x', b='y').inc() self.assertEqual(1, self.registry.get_sample_value('two_total', {'a': 'x', 'b': 'y'})) self.assertRaises(ValueError, self.two_labels.labels, a='x', b='y', c='z') self.assertRaises(ValueError, self.two_labels.labels, a='x', c='z') self.assertRaises(ValueError, self.two_labels.labels, b='y', c='z') self.assertRaises(ValueError, self.two_labels.labels, c='z') self.assertRaises(ValueError, self.two_labels.labels) self.assertRaises(ValueError, self.two_labels.labels, {'a': 'x'}, b='y') def test_invalid_names_raise(self): self.assertRaises(ValueError, Counter, '', 'help') self.assertRaises(ValueError, Counter, '^', 'help') self.assertRaises(ValueError, Counter, '', 'help', namespace='&') self.assertRaises(ValueError, Counter, '', 'help', subsystem='(') self.assertRaises(ValueError, Counter, 'c_total', '', labelnames=['^']) self.assertRaises(ValueError, Counter, 'c_total', '', labelnames=['a:b']) self.assertRaises(ValueError, Counter, 'c_total', '', labelnames=['__reserved']) self.assertRaises(ValueError, Summary, 'c_total', '', labelnames=['quantile']) def test_empty_labels_list(self): Histogram('h', 'help', [], registry=self.registry) self.assertEqual(0, self.registry.get_sample_value('h_sum')) def test_unit_appended(self): Histogram('h', 'help', [], registry=self.registry, unit="seconds") self.assertEqual(0, self.registry.get_sample_value('h_seconds_sum')) def test_unit_notappended(self): Histogram('h_seconds', 'help', [], registry=self.registry, unit="seconds") self.assertEqual(0, self.registry.get_sample_value('h_seconds_sum')) def test_no_units_for_info_enum(self): self.assertRaises(ValueError, Info, 'foo', 'help', unit="x") self.assertRaises(ValueError, Enum, 'foo', 'help', unit="x") class TestMetricFamilies(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() def custom_collector(self, metric_family): class CustomCollector(object): def collect(self): return [metric_family] self.registry.register(CustomCollector()) def test_untyped(self): self.custom_collector(UntypedMetricFamily('u', 'help', value=1)) self.assertEqual(1, self.registry.get_sample_value('u', {})) def test_untyped_labels(self): cmf = UntypedMetricFamily('u', 'help', labels=['a', 'c']) cmf.add_metric(['b', 'd'], 2) self.custom_collector(cmf) self.assertEqual(2, self.registry.get_sample_value('u', {'a': 'b', 'c': 'd'})) def test_untyped_unit(self): self.custom_collector(UntypedMetricFamily('u', 'help', value=1, unit='unit')) self.assertEqual(1, self.registry.get_sample_value('u_unit', {})) def test_counter(self): self.custom_collector(CounterMetricFamily('c_total', 'help', value=1)) self.assertEqual(1, self.registry.get_sample_value('c_total', {})) def test_counter_total(self): self.custom_collector(CounterMetricFamily('c_total', 'help', value=1)) self.assertEqual(1, self.registry.get_sample_value('c_total', {})) def test_counter_labels(self): cmf = CounterMetricFamily('c_total', 'help', labels=['a', 'c_total']) cmf.add_metric(['b', 'd'], 2) self.custom_collector(cmf) self.assertEqual(2, self.registry.get_sample_value('c_total', {'a': 'b', 'c_total': 'd'})) def test_gauge(self): self.custom_collector(GaugeMetricFamily('g', 'help', value=1)) self.assertEqual(1, self.registry.get_sample_value('g', {})) def test_gauge_labels(self): cmf = GaugeMetricFamily('g', 'help', labels=['a']) cmf.add_metric(['b'], 2) self.custom_collector(cmf) self.assertEqual(2, self.registry.get_sample_value('g', {'a': 'b'})) def test_summary(self): self.custom_collector(SummaryMetricFamily('s', 'help', count_value=1, sum_value=2)) self.assertEqual(1, self.registry.get_sample_value('s_count', {})) self.assertEqual(2, self.registry.get_sample_value('s_sum', {})) def test_summary_labels(self): cmf = SummaryMetricFamily('s', 'help', labels=['a']) cmf.add_metric(['b'], count_value=1, sum_value=2) self.custom_collector(cmf) self.assertEqual(1, self.registry.get_sample_value('s_count', {'a': 'b'})) self.assertEqual(2, self.registry.get_sample_value('s_sum', {'a': 'b'})) def test_histogram(self): self.custom_collector(HistogramMetricFamily('h', 'help', buckets=[('0', 1), ('+Inf', 2)], sum_value=3)) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '0'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) self.assertEqual(2, self.registry.get_sample_value('h_count', {})) self.assertEqual(3, self.registry.get_sample_value('h_sum', {})) def test_histogram_labels(self): cmf = HistogramMetricFamily('h', 'help', labels=['a']) cmf.add_metric(['b'], buckets=[('0', 1), ('+Inf', 2)], sum_value=3) self.custom_collector(cmf) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '0'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '+Inf'})) self.assertEqual(2, self.registry.get_sample_value('h_count', {'a': 'b'})) self.assertEqual(3, self.registry.get_sample_value('h_sum', {'a': 'b'})) def test_gaugehistogram(self): self.custom_collector(GaugeHistogramMetricFamily('h', 'help', buckets=[('0', 1), ('+Inf', 2)])) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '0'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'})) def test_gaugehistogram_labels(self): cmf = GaugeHistogramMetricFamily('h', 'help', labels=['a']) cmf.add_metric(['b'], buckets=[('0', 1), ('+Inf', 2)], gsum_value=3) self.custom_collector(cmf) self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '0'})) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '+Inf'})) self.assertEqual(2, self.registry.get_sample_value('h_gcount', {'a': 'b'})) self.assertEqual(3, self.registry.get_sample_value('h_gsum', {'a': 'b'})) def test_info(self): self.custom_collector(InfoMetricFamily('i', 'help', value={'a': 'b'})) self.assertEqual(1, self.registry.get_sample_value('i_info', {'a': 'b'})) def test_info_labels(self): cmf = InfoMetricFamily('i', 'help', labels=['a']) cmf.add_metric(['b'], {'c': 'd'}) self.custom_collector(cmf) self.assertEqual(1, self.registry.get_sample_value('i_info', {'a': 'b', 'c': 'd'})) def test_stateset(self): self.custom_collector(StateSetMetricFamily('s', 'help', value={'a': True, 'b': True, })) self.assertEqual(1, self.registry.get_sample_value('s', {'s': 'a'})) self.assertEqual(1, self.registry.get_sample_value('s', {'s': 'b'})) def test_stateset_labels(self): cmf = StateSetMetricFamily('s', 'help', labels=['foo']) cmf.add_metric(['bar'], {'a': False, 'b': False, }) self.custom_collector(cmf) self.assertEqual(0, self.registry.get_sample_value('s', {'foo': 'bar', 's': 'a'})) self.assertEqual(0, self.registry.get_sample_value('s', {'foo': 'bar', 's': 'b'})) def test_bad_constructors(self): self.assertRaises(ValueError, UntypedMetricFamily, 'u', 'help', value=1, labels=[]) self.assertRaises(ValueError, UntypedMetricFamily, 'u', 'help', value=1, labels=['a']) self.assertRaises(ValueError, CounterMetricFamily, 'c_total', 'help', value=1, labels=[]) self.assertRaises(ValueError, CounterMetricFamily, 'c_total', 'help', value=1, labels=['a']) self.assertRaises(ValueError, GaugeMetricFamily, 'g', 'help', value=1, labels=[]) self.assertRaises(ValueError, GaugeMetricFamily, 'g', 'help', value=1, labels=['a']) self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', sum_value=1) self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1) self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1, labels=['a']) self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', sum_value=1, labels=['a']) self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1, sum_value=1, labels=['a']) self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', sum_value=1) self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}) self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', sum_value=1, labels=['a']) self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}, labels=['a']) self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}, sum_value=1, labels=['a']) self.assertRaises(KeyError, HistogramMetricFamily, 'h', 'help', buckets={}, sum_value=1) self.assertRaises(ValueError, InfoMetricFamily, 'i', 'help', value={}, labels=[]) self.assertRaises(ValueError, InfoMetricFamily, 'i', 'help', value={}, labels=['a']) self.assertRaises(ValueError, StateSetMetricFamily, 's', 'help', value={'a': True}, labels=[]) self.assertRaises(ValueError, StateSetMetricFamily, 's', 'help', value={'a': True}, labels=['a']) def test_labelnames(self): cmf = UntypedMetricFamily('u', 'help', labels=iter(['a'])) self.assertEqual(('a',), cmf._labelnames) cmf = CounterMetricFamily('c_total', 'help', labels=iter(['a'])) self.assertEqual(('a',), cmf._labelnames) gmf = GaugeMetricFamily('g', 'help', labels=iter(['a'])) self.assertEqual(('a',), gmf._labelnames) smf = SummaryMetricFamily('s', 'help', labels=iter(['a'])) self.assertEqual(('a',), smf._labelnames) hmf = HistogramMetricFamily('h', 'help', labels=iter(['a'])) self.assertEqual(('a',), hmf._labelnames) class TestCollectorRegistry(unittest.TestCase): def test_duplicate_metrics_raises(self): registry = CollectorRegistry() Counter('c_total', 'help', registry=registry) self.assertRaises(ValueError, Counter, 'c_total', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 'c_total', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 'c_created', 'help', registry=registry) Gauge('g_created', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 'g_created', 'help', registry=registry) self.assertRaises(ValueError, Counter, 'g', 'help', registry=registry) Summary('s', 'help', registry=registry) self.assertRaises(ValueError, Summary, 's', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 's_created', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 's_sum', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 's_count', 'help', registry=registry) # We don't currently expose quantiles, but let's prevent future # clashes anyway. self.assertRaises(ValueError, Gauge, 's', 'help', registry=registry) Histogram('h', 'help', registry=registry) self.assertRaises(ValueError, Histogram, 'h', 'help', registry=registry) # Clashes aggaint various suffixes. self.assertRaises(ValueError, Summary, 'h', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 'h_count', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 'h_sum', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 'h_bucket', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 'h_created', 'help', registry=registry) # The name of the histogram itself isn't taken. Gauge('h', 'help', registry=registry) def test_unregister_works(self): registry = CollectorRegistry() s = Summary('s', 'help', registry=registry) self.assertRaises(ValueError, Gauge, 's_count', 'help', registry=registry) registry.unregister(s) Gauge('s_count', 'help', registry=registry) def custom_collector(self, metric_family, registry): class CustomCollector(object): def collect(self): return [metric_family] registry.register(CustomCollector()) def test_autodescribe_disabled_by_default(self): registry = CollectorRegistry() self.custom_collector(CounterMetricFamily('c_total', 'help', value=1), registry) self.custom_collector(CounterMetricFamily('c_total', 'help', value=1), registry) registry = CollectorRegistry(auto_describe=True) self.custom_collector(CounterMetricFamily('c_total', 'help', value=1), registry) self.assertRaises(ValueError, self.custom_collector, CounterMetricFamily('c_total', 'help', value=1), registry) def test_restricted_registry(self): registry = CollectorRegistry() Counter('c_total', 'help', registry=registry) Summary('s', 'help', registry=registry).observe(7) m = Metric('s', 'help', 'summary') m.samples = [Sample('s_sum', {}, 7)] self.assertEquals([m], registry.restricted_registry(['s_sum']).collect()) if __name__ == '__main__': unittest.main() python-prometheus-client-0.7.1/tests/test_exposition.py000066400000000000000000000354611350270547000234750ustar00rootroot00000000000000from __future__ import unicode_literals import sys import threading import time import pytest from prometheus_client import ( CollectorRegistry, CONTENT_TYPE_LATEST, Counter, delete_from_gateway, Enum, Gauge, generate_latest, Histogram, Info, instance_ip_grouping_key, Metric, push_to_gateway, pushadd_to_gateway, Summary, ) from prometheus_client import core from prometheus_client.core import GaugeHistogramMetricFamily, Timestamp from prometheus_client.exposition import ( basic_auth_handler, default_handler, MetricsHandler ) if sys.version_info < (2, 7): # We need the skip decorators from unittest2 on Python 2.6. import unittest2 as unittest else: import unittest try: from BaseHTTPServer import BaseHTTPRequestHandler from BaseHTTPServer import HTTPServer except ImportError: # Python 3 from http.server import BaseHTTPRequestHandler from http.server import HTTPServer class TestGenerateText(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() # Mock time so _created values are fixed. self.old_time = time.time time.time = lambda: 123.456 def tearDown(self): time.time = self.old_time def custom_collector(self, metric_family): class CustomCollector(object): def collect(self): return [metric_family] self.registry.register(CustomCollector()) def test_counter(self): c = Counter('cc', 'A counter', registry=self.registry) c.inc() self.assertEqual(b"""# HELP cc_total A counter # TYPE cc_total counter cc_total 1.0 # TYPE cc_created gauge cc_created 123.456 """, generate_latest(self.registry)) def test_counter_total(self): c = Counter('cc_total', 'A counter', registry=self.registry) c.inc() self.assertEqual(b"""# HELP cc_total A counter # TYPE cc_total counter cc_total 1.0 # TYPE cc_created gauge cc_created 123.456 """, generate_latest(self.registry)) def test_gauge(self): g = Gauge('gg', 'A gauge', registry=self.registry) g.set(17) self.assertEqual(b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n', generate_latest(self.registry)) def test_summary(self): s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry) s.labels('c', 'd').observe(17) self.assertEqual(b"""# HELP ss A summary # TYPE ss summary ss_count{a="c",b="d"} 1.0 ss_sum{a="c",b="d"} 17.0 # TYPE ss_created gauge ss_created{a="c",b="d"} 123.456 """, generate_latest(self.registry)) @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_histogram(self): s = Histogram('hh', 'A histogram', registry=self.registry) s.observe(0.05) self.assertEqual(b"""# HELP hh A histogram # TYPE hh histogram hh_bucket{le="0.005"} 0.0 hh_bucket{le="0.01"} 0.0 hh_bucket{le="0.025"} 0.0 hh_bucket{le="0.05"} 1.0 hh_bucket{le="0.075"} 1.0 hh_bucket{le="0.1"} 1.0 hh_bucket{le="0.25"} 1.0 hh_bucket{le="0.5"} 1.0 hh_bucket{le="0.75"} 1.0 hh_bucket{le="1.0"} 1.0 hh_bucket{le="2.5"} 1.0 hh_bucket{le="5.0"} 1.0 hh_bucket{le="7.5"} 1.0 hh_bucket{le="10.0"} 1.0 hh_bucket{le="+Inf"} 1.0 hh_count 1.0 hh_sum 0.05 # TYPE hh_created gauge hh_created 123.456 """, generate_latest(self.registry)) def test_gaugehistogram(self): self.custom_collector(GaugeHistogramMetricFamily('gh', 'help', buckets=[('1.0', 4), ('+Inf', 5)], gsum_value=7)) self.assertEqual(b"""# HELP gh help # TYPE gh histogram gh_bucket{le="1.0"} 4.0 gh_bucket{le="+Inf"} 5.0 # TYPE gh_gcount gauge gh_gcount 5.0 # TYPE gh_gsum gauge gh_gsum 7.0 """, generate_latest(self.registry)) def test_info(self): i = Info('ii', 'A info', ['a', 'b'], registry=self.registry) i.labels('c', 'd').info({'foo': 'bar'}) self.assertEqual(b'# HELP ii_info A info\n# TYPE ii_info gauge\nii_info{a="c",b="d",foo="bar"} 1.0\n', generate_latest(self.registry)) def test_enum(self): i = Enum('ee', 'An enum', ['a', 'b'], registry=self.registry, states=['foo', 'bar']) i.labels('c', 'd').state('bar') self.assertEqual( b'# HELP ee An enum\n# TYPE ee gauge\nee{a="c",b="d",ee="foo"} 0.0\nee{a="c",b="d",ee="bar"} 1.0\n', generate_latest(self.registry)) def test_unicode(self): c = Gauge('cc', '\u4500', ['l'], registry=self.registry) c.labels('\u4500').inc() self.assertEqual(b'# HELP cc \xe4\x94\x80\n# TYPE cc gauge\ncc{l="\xe4\x94\x80"} 1.0\n', generate_latest(self.registry)) def test_escaping(self): g = Gauge('cc', 'A\ngaug\\e', ['a'], registry=self.registry) g.labels('\\x\n"').inc(1) self.assertEqual(b'# HELP cc A\\ngaug\\\\e\n# TYPE cc gauge\ncc{a="\\\\x\\n\\""} 1.0\n', generate_latest(self.registry)) def test_nonnumber(self): class MyNumber(object): def __repr__(self): return "MyNumber(123)" def __float__(self): return 123.0 class MyCollector(object): def collect(self): metric = Metric("nonnumber", "Non number", 'untyped') metric.add_sample("nonnumber", {}, MyNumber()) yield metric self.registry.register(MyCollector()) self.assertEqual(b'# HELP nonnumber Non number\n# TYPE nonnumber untyped\nnonnumber 123.0\n', generate_latest(self.registry)) def test_timestamp(self): class MyCollector(object): def collect(self): metric = Metric("ts", "help", 'untyped') metric.add_sample("ts", {"foo": "a"}, 0, 123.456) metric.add_sample("ts", {"foo": "b"}, 0, -123.456) metric.add_sample("ts", {"foo": "c"}, 0, 123) metric.add_sample("ts", {"foo": "d"}, 0, Timestamp(123, 456000000)) metric.add_sample("ts", {"foo": "e"}, 0, Timestamp(123, 456000)) metric.add_sample("ts", {"foo": "f"}, 0, Timestamp(123, 456)) yield metric self.registry.register(MyCollector()) self.assertEqual(b"""# HELP ts help # TYPE ts untyped ts{foo="a"} 0.0 123456 ts{foo="b"} 0.0 -123456 ts{foo="c"} 0.0 123000 ts{foo="d"} 0.0 123456 ts{foo="e"} 0.0 123000 ts{foo="f"} 0.0 123000 """, generate_latest(self.registry)) class TestPushGateway(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.counter = Gauge('g', 'help', registry=self.registry) self.requests = requests = [] class TestHandler(BaseHTTPRequestHandler): def do_PUT(self): if 'with_basic_auth' in self.requestline and self.headers['authorization'] != 'Basic Zm9vOmJhcg==': self.send_response(401) else: self.send_response(201) length = int(self.headers['content-length']) requests.append((self, self.rfile.read(length))) self.end_headers() do_POST = do_PUT do_DELETE = do_PUT httpd = HTTPServer(('localhost', 0), TestHandler) self.address = 'http://localhost:{0}'.format(httpd.server_address[1]) class TestServer(threading.Thread): def run(self): httpd.handle_request() self.server = TestServer() self.server.daemon = True self.server.start() def test_push(self): push_to_gateway(self.address, "my_job", self.registry) self.assertEqual(self.requests[0][0].command, 'PUT') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') def test_push_with_groupingkey(self): push_to_gateway(self.address, "my_job", self.registry, {'a': 9}) self.assertEqual(self.requests[0][0].command, 'PUT') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') def test_push_with_complex_groupingkey(self): push_to_gateway(self.address, "my_job", self.registry, {'a': 9, 'b': 'a/ z'}) self.assertEqual(self.requests[0][0].command, 'PUT') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9/b/a%2F+z') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') def test_pushadd(self): pushadd_to_gateway(self.address, "my_job", self.registry) self.assertEqual(self.requests[0][0].command, 'POST') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') def test_pushadd_with_groupingkey(self): pushadd_to_gateway(self.address, "my_job", self.registry, {'a': 9}) self.assertEqual(self.requests[0][0].command, 'POST') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') def test_delete(self): delete_from_gateway(self.address, "my_job") self.assertEqual(self.requests[0][0].command, 'DELETE') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'') def test_delete_with_groupingkey(self): delete_from_gateway(self.address, "my_job", {'a': 9}) self.assertEqual(self.requests[0][0].command, 'DELETE') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'') def test_push_with_handler(self): def my_test_handler(url, method, timeout, headers, data): headers.append(['X-Test-Header', 'foobar']) # Handler should be passed sane default timeout self.assertEqual(timeout, 30) return default_handler(url, method, timeout, headers, data) push_to_gateway(self.address, "my_job", self.registry, handler=my_test_handler) self.assertEqual(self.requests[0][0].command, 'PUT') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][0].headers.get('x-test-header'), 'foobar') self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') def test_push_with_basic_auth_handler(self): def my_auth_handler(url, method, timeout, headers, data): return basic_auth_handler(url, method, timeout, headers, data, "foo", "bar") push_to_gateway(self.address, "my_job_with_basic_auth", self.registry, handler=my_auth_handler) self.assertEqual(self.requests[0][0].command, 'PUT') self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job_with_basic_auth') self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST) self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n') @unittest.skipIf( sys.platform == "darwin", "instance_ip_grouping_key() does not work on macOS." ) def test_instance_ip_grouping_key(self): self.assertTrue('' != instance_ip_grouping_key()['instance']) def test_metrics_handler(self): handler = MetricsHandler.factory(self.registry) self.assertEqual(handler.registry, self.registry) def test_metrics_handler_subclassing(self): subclass = type(str('MetricsHandlerSubclass'), (MetricsHandler, object), {}) handler = subclass.factory(self.registry) self.assertTrue(issubclass(handler, (MetricsHandler, subclass))) @pytest.fixture def registry(): return core.CollectorRegistry() class Collector: def __init__(self, metric_family, *values): self.metric_family = metric_family self.values = values def collect(self): self.metric_family.add_metric([], *self.values) return [self.metric_family] def _expect_metric_exception(registry, expected_error): try: generate_latest(registry) except expected_error as exception: assert isinstance(exception.args[-1], core.Metric) # Got a valid error as expected, return quietly return raise RuntimeError('Expected exception not raised') @pytest.mark.parametrize('MetricFamily', [ core.CounterMetricFamily, core.GaugeMetricFamily, ]) @pytest.mark.parametrize('value,error', [ (None, TypeError), ('', ValueError), ('x', ValueError), ([], TypeError), ({}, TypeError), ]) def test_basic_metric_families(registry, MetricFamily, value, error): metric_family = MetricFamily(MetricFamily.__name__, 'help') registry.register(Collector(metric_family, value)) _expect_metric_exception(registry, error) @pytest.mark.parametrize('count_value,sum_value,error', [ (None, 0, TypeError), (0, None, TypeError), ('', 0, ValueError), (0, '', ValueError), ([], 0, TypeError), (0, [], TypeError), ({}, 0, TypeError), (0, {}, TypeError), ]) def test_summary_metric_family(registry, count_value, sum_value, error): metric_family = core.SummaryMetricFamily('summary', 'help') registry.register(Collector(metric_family, count_value, sum_value)) _expect_metric_exception(registry, error) @pytest.mark.parametrize('MetricFamily', [ core.HistogramMetricFamily, core.GaugeHistogramMetricFamily, ]) @pytest.mark.parametrize('buckets,sum_value,error', [ ([('spam', 0), ('eggs', 0)], None, TypeError), ([('spam', 0), ('eggs', None)], 0, TypeError), ([('spam', 0), (None, 0)], 0, AttributeError), ([('spam', None), ('eggs', 0)], 0, TypeError), ([(None, 0), ('eggs', 0)], 0, AttributeError), ([('spam', 0), ('eggs', 0)], '', ValueError), ([('spam', 0), ('eggs', '')], 0, ValueError), ([('spam', ''), ('eggs', 0)], 0, ValueError), ]) def test_histogram_metric_families(MetricFamily, registry, buckets, sum_value, error): metric_family = MetricFamily(MetricFamily.__name__, 'help') registry.register(Collector(metric_family, buckets, sum_value)) _expect_metric_exception(registry, error) if __name__ == '__main__': unittest.main() python-prometheus-client-0.7.1/tests/test_gc_collector.py000066400000000000000000000037401350270547000237260ustar00rootroot00000000000000from __future__ import unicode_literals import gc import platform import sys if sys.version_info < (2, 7): # We need the skip decorators from unittest2 on Python 2.6. import unittest2 as unittest else: import unittest from prometheus_client import CollectorRegistry, GCCollector SKIP = sys.version_info < (3, 4) or platform.python_implementation() != "CPython" @unittest.skipIf(SKIP, "Test requires CPython 3.4 +") class TestGCCollector(unittest.TestCase): def setUp(self): gc.disable() gc.collect() self.registry = CollectorRegistry() def test_working(self): GCCollector(registry=self.registry) self.registry.collect() before = self.registry.get_sample_value('python_gc_objects_collected_total', labels={"generation": "0"}) # add targets for gc a = [] a.append(a) del a b = [] b.append(b) del b gc.collect(0) self.registry.collect() after = self.registry.get_sample_value('python_gc_objects_collected_total', labels={"generation": "0"}) self.assertEqual(2, after - before) self.assertEqual(0, self.registry.get_sample_value( 'python_gc_objects_uncollectable_total', labels={"generation": "0"})) def test_empty(self): GCCollector(registry=self.registry) self.registry.collect() before = self.registry.get_sample_value('python_gc_objects_collected_total', labels={"generation": "0"}) gc.collect(0) self.registry.collect() after = self.registry.get_sample_value('python_gc_objects_collected_total', labels={"generation": "0"}) self.assertEqual(0, after - before) def tearDown(self): gc.enable() python-prometheus-client-0.7.1/tests/test_graphite_bridge.py000066400000000000000000000040441350270547000244040ustar00rootroot00000000000000import threading import unittest from prometheus_client import CollectorRegistry, Gauge from prometheus_client.bridge.graphite import GraphiteBridge try: import SocketServer except ImportError: import socketserver as SocketServer def fake_timer(): return 1434898897.5 class TestGraphiteBridge(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.data = '' class TCPHandler(SocketServer.BaseRequestHandler): def handle(s): self.data = s.request.recv(1024) server = SocketServer.TCPServer(('', 0), TCPHandler) class ServingThread(threading.Thread): def run(self): server.handle_request() server.socket.close() self.t = ServingThread() self.t.start() # Explicitly use localhost as the target host, since connecting to 0.0.0.0 fails on Windows address = ('localhost', server.server_address[1]) self.gb = GraphiteBridge(address, self.registry, _timer=fake_timer) def test_nolabels(self): gauge = Gauge('g', 'help', registry=self.registry) gauge.inc() self.gb.push() self.t.join() self.assertEqual(b'g 1.0 1434898897\n', self.data) def test_labels(self): labels = Gauge('labels', 'help', ['a', 'b'], registry=self.registry) labels.labels('c', 'd').inc() self.gb.push() self.t.join() self.assertEqual(b'labels.a.c.b.d 1.0 1434898897\n', self.data) def test_prefix(self): labels = Gauge('labels', 'help', ['a', 'b'], registry=self.registry) labels.labels('c', 'd').inc() self.gb.push(prefix='pre.fix') self.t.join() self.assertEqual(b'pre.fix.labels.a.c.b.d 1.0 1434898897\n', self.data) def test_sanitizing(self): labels = Gauge('labels', 'help', ['a'], registry=self.registry) labels.labels('c.:8').inc() self.gb.push() self.t.join() self.assertEqual(b'labels.a.c__8 1.0 1434898897\n', self.data) python-prometheus-client-0.7.1/tests/test_multiprocess.py000066400000000000000000000323751350270547000240260ustar00rootroot00000000000000from __future__ import unicode_literals import glob import os import shutil import sys import tempfile from prometheus_client import mmap_dict, values from prometheus_client.core import ( CollectorRegistry, Counter, Gauge, Histogram, Sample, Summary, ) from prometheus_client.multiprocess import ( mark_process_dead, MultiProcessCollector, ) from prometheus_client.values import MultiProcessValue, MutexValue if sys.version_info < (2, 7): # We need the skip decorators from unittest2 on Python 2.6. import unittest2 as unittest else: import unittest class TestMultiProcess(unittest.TestCase): def setUp(self): self.tempdir = tempfile.mkdtemp() os.environ['prometheus_multiproc_dir'] = self.tempdir values.ValueClass = MultiProcessValue(lambda: 123) self.registry = CollectorRegistry() self.collector = MultiProcessCollector(self.registry, self.tempdir) @property def _value_class(self): return def tearDown(self): del os.environ['prometheus_multiproc_dir'] shutil.rmtree(self.tempdir) values.ValueClass = MutexValue def test_counter_adds(self): c1 = Counter('c', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) c2 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c_total')) c1.inc(1) c2.inc(2) self.assertEqual(3, self.registry.get_sample_value('c_total')) def test_summary_adds(self): s1 = Summary('s', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) s2 = Summary('s', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('s_count')) self.assertEqual(0, self.registry.get_sample_value('s_sum')) s1.observe(1) s2.observe(2) self.assertEqual(2, self.registry.get_sample_value('s_count')) self.assertEqual(3, self.registry.get_sample_value('s_sum')) def test_histogram_adds(self): h1 = Histogram('h', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) h2 = Histogram('h', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('h_count')) self.assertEqual(0, self.registry.get_sample_value('h_sum')) self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) h1.observe(1) h2.observe(2) self.assertEqual(2, self.registry.get_sample_value('h_count')) self.assertEqual(3, self.registry.get_sample_value('h_sum')) self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'})) def test_gauge_all(self): g1 = Gauge('g', 'help', registry=None) values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) def test_gauge_liveall(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='liveall') self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(0, self.registry.get_sample_value('g', {'pid': '456'})) g1.set(1) g2.set(2) self.assertEqual(1, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(None, self.registry.get_sample_value('g', {'pid': '123'})) self.assertEqual(2, self.registry.get_sample_value('g', {'pid': '456'})) def test_gauge_min(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='min') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='min') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(1, self.registry.get_sample_value('g')) def test_gauge_max(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='max') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='max') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(2, self.registry.get_sample_value('g')) def test_gauge_livesum(self): g1 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') values.ValueClass = MultiProcessValue(lambda: 456) g2 = Gauge('g', 'help', registry=None, multiprocess_mode='livesum') self.assertEqual(0, self.registry.get_sample_value('g')) g1.set(1) g2.set(2) self.assertEqual(3, self.registry.get_sample_value('g')) mark_process_dead(123, os.environ['prometheus_multiproc_dir']) self.assertEqual(2, self.registry.get_sample_value('g')) def test_namespace_subsystem(self): c1 = Counter('c', 'help', registry=None, namespace='ns', subsystem='ss') c1.inc(1) self.assertEqual(1, self.registry.get_sample_value('ns_ss_c_total')) def test_counter_across_forks(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) c1 = Counter('c', 'help', registry=None) self.assertEqual(0, self.registry.get_sample_value('c_total')) c1.inc(1) c1.inc(1) pid = 1 c1.inc(1) self.assertEqual(3, self.registry.get_sample_value('c_total')) self.assertEqual(1, c1._value.get()) def test_initialization_detects_pid_change(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) # can not inspect the files cache directly, as it's a closure, so we # check for the actual files themselves def files(): fs = os.listdir(os.environ['prometheus_multiproc_dir']) fs.sort() return fs c1 = Counter('c1', 'c1', registry=None) self.assertEqual(files(), ['counter_0.db']) c2 = Counter('c2', 'c2', registry=None) self.assertEqual(files(), ['counter_0.db']) pid = 1 c3 = Counter('c3', 'c3', registry=None) self.assertEqual(files(), ['counter_0.db', 'counter_1.db']) @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_collect(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l c = Counter('c', 'help', labelnames=labels.keys(), registry=None) g = Gauge('g', 'help', labelnames=labels.keys(), registry=None) h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(1) pid = 1 c.labels(**labels).inc(1) g.labels(**labels).set(1) h.labels(**labels).observe(5) metrics = dict((m.name, m) for m in self.collector.collect()) self.assertEqual( metrics['c'].samples, [Sample('c_total', labels, 2.0)] ) metrics['g'].samples.sort(key=lambda x: x[1]['pid']) self.assertEqual(metrics['g'].samples, [ Sample('g', add_label('pid', '0'), 1.0), Sample('g', add_label('pid', '1'), 1.0), ]) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0))) ) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 1.0), Sample('h_bucket', add_label('le', '5.0'), 2.0), Sample('h_bucket', add_label('le', '7.5'), 2.0), Sample('h_bucket', add_label('le', '10.0'), 2.0), Sample('h_bucket', add_label('le', '+Inf'), 2.0), Sample('h_count', labels, 2.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram) @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_merge_no_accumulate(self): pid = 0 values.ValueClass = MultiProcessValue(lambda: pid) labels = dict((i, i) for i in 'abcd') def add_label(key, value): l = labels.copy() l[key] = value return l h = Histogram('h', 'help', labelnames=labels.keys(), registry=None) h.labels(**labels).observe(1) pid = 1 h.labels(**labels).observe(5) path = os.path.join(os.environ['prometheus_multiproc_dir'], '*.db') files = glob.glob(path) metrics = dict( (m.name, m) for m in self.collector.merge(files, accumulate=False) ) metrics['h'].samples.sort( key=lambda x: (x[0], float(x[1].get('le', 0))) ) expected_histogram = [ Sample('h_bucket', add_label('le', '0.005'), 0.0), Sample('h_bucket', add_label('le', '0.01'), 0.0), Sample('h_bucket', add_label('le', '0.025'), 0.0), Sample('h_bucket', add_label('le', '0.05'), 0.0), Sample('h_bucket', add_label('le', '0.075'), 0.0), Sample('h_bucket', add_label('le', '0.1'), 0.0), Sample('h_bucket', add_label('le', '0.25'), 0.0), Sample('h_bucket', add_label('le', '0.5'), 0.0), Sample('h_bucket', add_label('le', '0.75'), 0.0), Sample('h_bucket', add_label('le', '1.0'), 1.0), Sample('h_bucket', add_label('le', '2.5'), 0.0), Sample('h_bucket', add_label('le', '5.0'), 1.0), Sample('h_bucket', add_label('le', '7.5'), 0.0), Sample('h_bucket', add_label('le', '10.0'), 0.0), Sample('h_bucket', add_label('le', '+Inf'), 0.0), Sample('h_sum', labels, 6.0), ] self.assertEqual(metrics['h'].samples, expected_histogram) def test_missing_gauge_file_during_merge(self): # These files don't exist, just like if mark_process_dead(9999999) had been # called during self.collector.collect(), after the glob found it # but before the merge actually happened. # This should not raise and return no metrics self.assertFalse(self.collector.merge([ os.path.join(self.tempdir, 'gauge_liveall_9999999.db'), os.path.join(self.tempdir, 'gauge_livesum_9999999.db'), ])) class TestMmapedDict(unittest.TestCase): def setUp(self): fd, self.tempfile = tempfile.mkstemp() os.close(fd) self.d = mmap_dict.MmapedDict(self.tempfile) def test_process_restart(self): self.d.write_value('abc', 123.0) self.d.close() self.d = mmap_dict.MmapedDict(self.tempfile) self.assertEqual(123, self.d.read_value('abc')) self.assertEqual([('abc', 123.0)], list(self.d.read_all_values())) def test_expansion(self): key = 'a' * mmap_dict._INITIAL_MMAP_SIZE self.d.write_value(key, 123.0) self.assertEqual([(key, 123.0)], list(self.d.read_all_values())) def test_multi_expansion(self): key = 'a' * mmap_dict._INITIAL_MMAP_SIZE * 4 self.d.write_value('abc', 42.0) self.d.write_value(key, 123.0) self.d.write_value('def', 17.0) self.assertEqual( [('abc', 42.0), (key, 123.0), ('def', 17.0)], list(self.d.read_all_values())) def test_corruption_detected(self): self.d.write_value('abc', 42.0) # corrupt the written data self.d._m[8:16] = b'somejunk' with self.assertRaises(RuntimeError): list(self.d.read_all_values()) def tearDown(self): os.unlink(self.tempfile) class TestUnsetEnv(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() fp, self.tmpfl = tempfile.mkstemp() os.close(fp) def test_unset_syncdir_env(self): self.assertRaises( ValueError, MultiProcessCollector, self.registry) def test_file_syncpath(self): registry = CollectorRegistry() self.assertRaises( ValueError, MultiProcessCollector, registry, self.tmpfl) def tearDown(self): os.remove(self.tmpfl) python-prometheus-client-0.7.1/tests/test_parser.py000066400000000000000000000306011350270547000225570ustar00rootroot00000000000000from __future__ import unicode_literals import math import sys from prometheus_client.core import ( CollectorRegistry, CounterMetricFamily, GaugeMetricFamily, HistogramMetricFamily, Metric, Sample, SummaryMetricFamily, ) from prometheus_client.exposition import generate_latest from prometheus_client.parser import text_string_to_metric_families if sys.version_info < (2, 7): # We need the skip decorators from unittest2 on Python 2.6. import unittest2 as unittest else: import unittest class TestParse(unittest.TestCase): def assertEqualMetrics(self, first, second, msg=None): super(TestParse, self).assertEqual(first, second, msg) # Test that samples are actually named tuples of type Sample. for a, b in zip(first, second): for sa, sb in zip(a.samples, b.samples): assert sa.name == sb.name def test_simple_counter(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a 1 """) self.assertEqualMetrics([CounterMetricFamily("a", "help", value=1)], list(families)) def test_simple_gauge(self): families = text_string_to_metric_families("""# TYPE a gauge # HELP a help a 1 """) self.assertEqualMetrics([GaugeMetricFamily("a", "help", value=1)], list(families)) def test_simple_summary(self): families = text_string_to_metric_families("""# TYPE a summary # HELP a help a_count 1 a_sum 2 """) summary = SummaryMetricFamily("a", "help", count_value=1, sum_value=2) self.assertEqualMetrics([summary], list(families)) def test_summary_quantiles(self): families = text_string_to_metric_families("""# TYPE a summary # HELP a help a_count 1 a_sum 2 a{quantile="0.5"} 0.7 """) # The Python client doesn't support quantiles, but we # still need to be able to parse them. metric_family = SummaryMetricFamily("a", "help", count_value=1, sum_value=2) metric_family.add_sample("a", {"quantile": "0.5"}, 0.7) self.assertEqualMetrics([metric_family], list(families)) def test_simple_histogram(self): families = text_string_to_metric_families("""# TYPE a histogram # HELP a help a_bucket{le="1"} 0 a_bucket{le="+Inf"} 3 a_count 3 a_sum 2 """) self.assertEqualMetrics([HistogramMetricFamily("a", "help", sum_value=2, buckets=[("1", 0.0), ("+Inf", 3.0)])], list(families)) def test_no_metadata(self): families = text_string_to_metric_families("""a 1 """) metric_family = Metric("a", "", "untyped") metric_family.add_sample("a", {}, 1) self.assertEqualMetrics([metric_family], list(families)) def test_untyped(self): # https://github.com/prometheus/client_python/issues/79 families = text_string_to_metric_families("""# HELP redis_connected_clients Redis connected clients # TYPE redis_connected_clients untyped redis_connected_clients{instance="rough-snowflake-web",port="6380"} 10.0 redis_connected_clients{instance="rough-snowflake-web",port="6381"} 12.0 """) m = Metric("redis_connected_clients", "Redis connected clients", "untyped") m.samples = [ Sample("redis_connected_clients", {"instance": "rough-snowflake-web", "port": "6380"}, 10), Sample("redis_connected_clients", {"instance": "rough-snowflake-web", "port": "6381"}, 12), ] self.assertEqualMetrics([m], list(families)) def test_type_help_switched(self): families = text_string_to_metric_families("""# HELP a help # TYPE a counter a 1 """) self.assertEqualMetrics([CounterMetricFamily("a", "help", value=1)], list(families)) def test_blank_lines_and_comments(self): families = text_string_to_metric_families(""" # TYPE a counter # FOO a # BAR b # HELP a help a 1 """) self.assertEqualMetrics([CounterMetricFamily("a", "help", value=1)], list(families)) def test_tabs(self): families = text_string_to_metric_families("""#\tTYPE\ta\tcounter #\tHELP\ta\thelp a\t1 """) self.assertEqualMetrics([CounterMetricFamily("a", "help", value=1)], list(families)) def test_labels_with_curly_braces(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a{foo="bar", bar="b{a}z"} 1 """) metric_family = CounterMetricFamily("a", "help", labels=["foo", "bar"]) metric_family.add_metric(["bar", "b{a}z"], 1) self.assertEqualMetrics([metric_family], list(families)) def test_empty_help(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a a 1 """) self.assertEqualMetrics([CounterMetricFamily("a", "", value=1)], list(families)) def test_labels_and_infinite(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a{foo="bar"} +Inf a{foo="baz"} -Inf """) metric_family = CounterMetricFamily("a", "help", labels=["foo"]) metric_family.add_metric(["bar"], float('inf')) metric_family.add_metric(["baz"], float('-inf')) self.assertEqualMetrics([metric_family], list(families)) def test_spaces(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a{ foo = "bar" } 1 a\t\t{\t\tfoo\t\t=\t\t"baz"\t\t}\t\t2 a { foo = "buz" } 3 a\t { \t foo\t = "biz"\t } \t 4 a \t{\t foo = "boz"\t}\t 5 """) metric_family = CounterMetricFamily("a", "help", labels=["foo"]) metric_family.add_metric(["bar"], 1) metric_family.add_metric(["baz"], 2) metric_family.add_metric(["buz"], 3) metric_family.add_metric(["biz"], 4) metric_family.add_metric(["boz"], 5) self.assertEqualMetrics([metric_family], list(families)) def test_commas(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a{foo="bar",} 1 a{foo="baz", } 1 # TYPE b counter # HELP b help b{,} 2 # TYPE c counter # HELP c help c{ ,} 3 # TYPE d counter # HELP d help d{, } 4 """) a = CounterMetricFamily("a", "help", labels=["foo"]) a.add_metric(["bar"], 1) a.add_metric(["baz"], 1) b = CounterMetricFamily("b", "help", value=2) c = CounterMetricFamily("c", "help", value=3) d = CounterMetricFamily("d", "help", value=4) self.assertEqualMetrics([a, b, c, d], list(families)) def test_multiple_trailing_commas(self): text = """# TYPE a counter # HELP a help a{foo="bar",, } 1 """ self.assertRaises(ValueError, lambda: list(text_string_to_metric_families(text))) def test_empty_brackets(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a{} 1 """) self.assertEqualMetrics([CounterMetricFamily("a", "help", value=1)], list(families)) def test_nan(self): families = text_string_to_metric_families("""a NaN """) # Can't use a simple comparison as nan != nan. self.assertTrue(math.isnan(list(families)[0].samples[0][2])) def test_empty_label(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a{foo="bar"} 1 a{foo=""} 2 """) metric_family = CounterMetricFamily("a", "help", labels=["foo"]) metric_family.add_metric(["bar"], 1) metric_family.add_metric([""], 2) self.assertEqualMetrics([metric_family], list(families)) def test_label_escaping(self): for escaped_val, unescaped_val in [ ('foo', 'foo'), ('\\foo', '\\foo'), ('\\\\foo', '\\foo'), ('foo\\\\', 'foo\\'), ('\\\\', '\\'), ('\\n', '\n'), ('\\\\n', '\\n'), ('\\\\\\n', '\\\n'), ('\\"', '"'), ('\\\\\\"', '\\"')]: families = list(text_string_to_metric_families(""" # TYPE a counter # HELP a help a{foo="%s",bar="baz"} 1 """ % escaped_val)) metric_family = CounterMetricFamily( "a", "help", labels=["foo", "bar"]) metric_family.add_metric([unescaped_val, "baz"], 1) self.assertEqualMetrics([metric_family], list(families)) def test_help_escaping(self): for escaped_val, unescaped_val in [ ('foo', 'foo'), ('\\foo', '\\foo'), ('\\\\foo', '\\foo'), ('foo\\', 'foo\\'), ('foo\\\\', 'foo\\'), ('\\n', '\n'), ('\\\\n', '\\n'), ('\\\\\\n', '\\\n'), ('\\"', '\\"'), ('\\\\"', '\\"'), ('\\\\\\"', '\\\\"')]: families = list(text_string_to_metric_families(""" # TYPE a counter # HELP a %s a{foo="bar"} 1 """ % escaped_val)) metric_family = CounterMetricFamily("a", unescaped_val, labels=["foo"]) metric_family.add_metric(["bar"], 1) self.assertEqualMetrics([metric_family], list(families)) def test_escaping(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a he\\n\\\\l\\tp a{foo="b\\"a\\nr"} 1 a{foo="b\\\\a\\z"} 2 """) metric_family = CounterMetricFamily("a", "he\n\\l\\tp", labels=["foo"]) metric_family.add_metric(["b\"a\nr"], 1) metric_family.add_metric(["b\\a\\z"], 2) self.assertEqualMetrics([metric_family], list(families)) def test_timestamps_discarded(self): families = text_string_to_metric_families("""# TYPE a counter # HELP a help a{foo="bar"} 1\t000 # TYPE b counter # HELP b help b 2 1234567890 """) a = CounterMetricFamily("a", "help", labels=["foo"]) a.add_metric(["bar"], 1) b = CounterMetricFamily("b", "help", value=2) self.assertEqualMetrics([a, b], list(families)) @unittest.skipIf(sys.version_info < (2, 7), "Test requires Python 2.7+.") def test_roundtrip(self): text = """# HELP go_gc_duration_seconds A summary of the GC invocation durations. # TYPE go_gc_duration_seconds summary go_gc_duration_seconds{quantile="0"} 0.013300656000000001 go_gc_duration_seconds{quantile="0.25"} 0.013638736 go_gc_duration_seconds{quantile="0.5"} 0.013759906 go_gc_duration_seconds{quantile="0.75"} 0.013962066 go_gc_duration_seconds{quantile="1"} 0.021383540000000003 go_gc_duration_seconds_sum 56.12904785 go_gc_duration_seconds_count 7476.0 # HELP go_goroutines Number of goroutines that currently exist. # TYPE go_goroutines gauge go_goroutines 166.0 # HELP prometheus_local_storage_indexing_batch_duration_milliseconds Quantiles for batch indexing duration in milliseconds. # TYPE prometheus_local_storage_indexing_batch_duration_milliseconds summary prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.5"} NaN prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.9"} NaN prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.99"} NaN prometheus_local_storage_indexing_batch_duration_milliseconds_sum 871.5665949999999 prometheus_local_storage_indexing_batch_duration_milliseconds_count 229.0 # HELP process_cpu_seconds_total Total user and system CPU time spent in seconds. # TYPE process_cpu_seconds_total counter process_cpu_seconds_total 29323.4 # HELP process_virtual_memory_bytes Virtual memory size in bytes. # TYPE process_virtual_memory_bytes gauge process_virtual_memory_bytes 2.478268416e+09 # HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, and branch from which Prometheus was built. # TYPE prometheus_build_info gauge prometheus_build_info{branch="HEAD",revision="ef176e5",version="0.16.0rc1"} 1.0 # HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type. # TYPE prometheus_local_storage_chunk_ops_total counter prometheus_local_storage_chunk_ops_total{type="clone"} 28.0 prometheus_local_storage_chunk_ops_total{type="create"} 997844.0 prometheus_local_storage_chunk_ops_total{type="drop"} 1.345758e+06 prometheus_local_storage_chunk_ops_total{type="load"} 1641.0 prometheus_local_storage_chunk_ops_total{type="persist"} 981408.0 prometheus_local_storage_chunk_ops_total{type="pin"} 32662.0 prometheus_local_storage_chunk_ops_total{type="transcode"} 980180.0 prometheus_local_storage_chunk_ops_total{type="unpin"} 32662.0 """ families = list(text_string_to_metric_families(text)) class TextCollector(object): def collect(self): return families registry = CollectorRegistry() registry.register(TextCollector()) self.assertEqual(text.encode('utf-8'), generate_latest(registry)) if __name__ == '__main__': unittest.main() python-prometheus-client-0.7.1/tests/test_platform_collector.py000066400000000000000000000040011350270547000251500ustar00rootroot00000000000000from __future__ import unicode_literals import unittest from prometheus_client import CollectorRegistry, PlatformCollector class TestPlatformCollector(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.platform = _MockPlatform() def test_python_info(self): PlatformCollector(registry=self.registry, platform=self.platform) self.assertLabels("python_info", { "version": "python_version", "implementation": "python_implementation", "major": "pvt_major", "minor": "pvt_minor", "patchlevel": "pvt_patchlevel" }) def test_system_info_java(self): self.platform._system = "Java" PlatformCollector(registry=self.registry, platform=self.platform) self.assertLabels("python_info", { "version": "python_version", "implementation": "python_implementation", "major": "pvt_major", "minor": "pvt_minor", "patchlevel": "pvt_patchlevel", "jvm_version": "jv_release", "jvm_release": "vm_release", "jvm_vendor": "vm_vendor", "jvm_name": "vm_name" }) def assertLabels(self, name, labels): for metric in self.registry.collect(): for s in metric.samples: if s.name == name: assert s.labels == labels return assert False class _MockPlatform(object): def __init__(self): self._system = "system" def python_version_tuple(self): return "pvt_major", "pvt_minor", "pvt_patchlevel" def python_version(self): return "python_version" def python_implementation(self): return "python_implementation" def system(self): return self._system def java_ver(self): return ( "jv_release", "jv_vendor", ("vm_name", "vm_release", "vm_vendor"), ("os_name", "os_version", "os_arch") ) python-prometheus-client-0.7.1/tests/test_process_collector.py000066400000000000000000000067601350270547000250200ustar00rootroot00000000000000from __future__ import unicode_literals import os import unittest from prometheus_client import CollectorRegistry, ProcessCollector class TestProcessCollector(unittest.TestCase): def setUp(self): self.registry = CollectorRegistry() self.test_proc = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'proc') def test_working(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry) collector._ticks = 100 self.assertEqual(17.21, self.registry.get_sample_value('process_cpu_seconds_total')) self.assertEqual(56274944.0, self.registry.get_sample_value('process_virtual_memory_bytes')) self.assertEqual(8114176, self.registry.get_sample_value('process_resident_memory_bytes')) self.assertEqual(1418184099.75, self.registry.get_sample_value('process_start_time_seconds')) self.assertEqual(2048.0, self.registry.get_sample_value('process_max_fds')) self.assertEqual(5.0, self.registry.get_sample_value('process_open_fds')) self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace')) def test_namespace(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry, namespace='n') collector._ticks = 100 self.assertEqual(17.21, self.registry.get_sample_value('n_process_cpu_seconds_total')) self.assertEqual(56274944.0, self.registry.get_sample_value('n_process_virtual_memory_bytes')) self.assertEqual(8114176, self.registry.get_sample_value('n_process_resident_memory_bytes')) self.assertEqual(1418184099.75, self.registry.get_sample_value('n_process_start_time_seconds')) self.assertEqual(2048.0, self.registry.get_sample_value('n_process_max_fds')) self.assertEqual(5.0, self.registry.get_sample_value('n_process_open_fds')) self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total')) def test_working_584(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: "584\n", registry=self.registry) collector._ticks = 100 self.assertEqual(0.0, self.registry.get_sample_value('process_cpu_seconds_total')) self.assertEqual(10395648.0, self.registry.get_sample_value('process_virtual_memory_bytes')) self.assertEqual(634880, self.registry.get_sample_value('process_resident_memory_bytes')) self.assertEqual(1418291667.75, self.registry.get_sample_value('process_start_time_seconds')) self.assertEqual(None, self.registry.get_sample_value('process_max_fds')) self.assertEqual(None, self.registry.get_sample_value('process_open_fds')) def test_working_fake_pid(self): collector = ProcessCollector(proc=self.test_proc, pid=lambda: 123, registry=self.registry) collector._ticks = 100 self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total')) self.assertEqual(None, self.registry.get_sample_value('process_virtual_memory_bytes')) self.assertEqual(None, self.registry.get_sample_value('process_resident_memory_bytes')) self.assertEqual(None, self.registry.get_sample_value('process_start_time_seconds')) self.assertEqual(None, self.registry.get_sample_value('process_max_fds')) self.assertEqual(None, self.registry.get_sample_value('process_open_fds')) self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace')) if __name__ == '__main__': unittest.main() python-prometheus-client-0.7.1/tests/test_twisted.py000066400000000000000000000030511350270547000227450ustar00rootroot00000000000000from __future__ import absolute_import, unicode_literals import sys from prometheus_client import CollectorRegistry, Counter, generate_latest if sys.version_info < (2, 7): from unittest2 import skipUnless else: from unittest import skipUnless try: from prometheus_client.twisted import MetricsResource from twisted.trial.unittest import TestCase from twisted.web.server import Site from twisted.web.resource import Resource from twisted.internet import reactor from twisted.web.client import Agent from twisted.web.client import readBody HAVE_TWISTED = True except ImportError: from unittest import TestCase HAVE_TWISTED = False class MetricsResourceTest(TestCase): @skipUnless(HAVE_TWISTED, "Don't have twisted installed.") def setUp(self): self.registry = CollectorRegistry() def test_reports_metrics(self): """ ``MetricsResource`` serves the metrics from the provided registry. """ c = Counter('cc', 'A counter', registry=self.registry) c.inc() root = Resource() root.putChild(b'metrics', MetricsResource(registry=self.registry)) server = reactor.listenTCP(0, Site(root)) self.addCleanup(server.stopListening) agent = Agent(reactor) port = server.getHost().port url = "http://localhost:{port}/metrics".format(port=port) d = agent.request(b"GET", url.encode("ascii")) d.addCallback(readBody) d.addCallback(self.assertEqual, generate_latest(self.registry)) return d python-prometheus-client-0.7.1/tox.ini000066400000000000000000000027721350270547000200330ustar00rootroot00000000000000[tox] envlist = coverage-clean,py26,py27,py34,py35,py36,py37,pypy,pypy3,{py27,py37}-nooptionals,coverage-report,flake8 [base] deps = coverage pytest [testenv:py26] ; Last pytest and py version supported on py26 . deps = unittest2 py==1.4.31 pytest==2.9.2 coverage futures [testenv:py27] deps = {[base]deps} futures [testenv:pypy] deps = {[base]deps} futures [testenv] deps = {[base]deps} {py27,py37,pypy,pypy3}: twisted commands = coverage run --parallel -m pytest {posargs} ; Ensure test suite passes if no optional dependencies are present. [testenv:py27-nooptionals] deps = {[base]deps} futures commands = coverage run --parallel -m pytest {posargs} [testenv:py37-nooptionals] commands = coverage run --parallel -m pytest {posargs} [testenv:coverage-clean] deps = coverage skip_install = true commands = coverage erase [testenv:coverage-report] deps = coverage skip_install = true commands = coverage combine coverage report [testenv:flake8] deps = flake8==3.5.0 flake8-docstrings==1.3.0 flake8-import-order==0.16 skip_install = true commands = flake8 prometheus_client/ tests/ setup.py [flake8] ignore = D, E303, E402, E501, E722, E741, F821, F841, W291, W293, W503, E129 import-order-style = google application-import-names = prometheus_client [isort] force_alphabetical_sort_within_sections = True force_sort_within_sections = True include_trailing_comma = True multi_line_output = 5