pax_global_header00006660000000000000000000000064133372701160014515gustar00rootroot0000000000000052 comment=100046bcd349013d6d78922585621159a33d1ebb pystatsd-3.3/000077500000000000000000000000001333727011600132355ustar00rootroot00000000000000pystatsd-3.3/.gitignore000066400000000000000000000000751333727011600152270ustar00rootroot00000000000000*.pyc *.egg-info build dist .tox .coverage *.swp docs/_build pystatsd-3.3/.travis.yml000066400000000000000000000003051333727011600153440ustar00rootroot00000000000000language: python sudo: false python: - "2.7" - "3.4" - "3.5" - "3.6" - "pypy" install: - pip install -q "flake8" script: - nosetests --with-coverage --cover-package=statsd - flake8 statsd/ pystatsd-3.3/AUTHORS000066400000000000000000000003651333727011600143110ustar00rootroot00000000000000Lead Developer: - James Socol Contributors: - Jeff Balogh - Andy McKay - Daniel Holz - Kyle Conroy - Mathieu Leplatre pystatsd-3.3/CHANGES000066400000000000000000000050771333727011600142410ustar00rootroot00000000000000Statsd Changelog ================ Version 3.3 ----------- - Drop support for Python 2.5, 2.6, 3.2, 3.3 (#108, #116). - Add UnixSocketStatsClient (#76, #112). - Add support for timedeltas in timing() (#104, #111). - Fix timer decorator with partial functions (#85). - Remove ABCMeta metaclass (incompatible with Py3) (#109). - Refactor client module (#115). - Various doc updates (#99, #102, #110, #113, #114). Version 3.2.2 ------------- - Use a monotomic timer to avoid clock adjustments (#96). - Test on Python 3.5 and 3.6. - Various doc updates. Version 3.2.1 ------------- - Restore `StatsClient(host, port, prefix)` argument order. Version 3.2 ----------- - Add an explicit IPv6 flag. - Add support for sub-millisecond timings Version 3.1 ----------- - Add IPv6 support. - Add TCPStatsClient/TCPPipeline to support connection-mode clients. Version 3.0.1 ------------- - Make timers-as-decorators threadsafe. Version 3.0 ----------- - Moved default client instances out of __init__.py. Now find them in the `statsd.defaults.{django,env}` modules. Version 2.1.2 ------------- - Fix negative absolute (non-delta) gauges. - Improve test coverage. Version 2.1.1 ------------- - Fix issue with timers used as decorators. Version 2.1 ----------- - Add maxudpsize option for Pipelines. - Add methods to use Timer objects directly. Version 2.0.3 ------------- - Handle large numbers in gauges correctly. - Add `set` type. - Pipelines use parent client's _after method. Version 2.0.2 ------------- - Don't try to pop stats off an empty pipeline. - Fix installs with Django 1.5 on the PYTHONPATH. Version 2.0.1 ------------- - Fix install with Django 1.5 in the environment. Version 2.0 ----------- - Add Pipeline subclass for batching. - Added an _after method subclasses can use to change behavior. - Add support for gauge deltas. Version 1.0 ----------- - Clean up tests and requirements. - Encode socket data in ASCII. - Tag v1. Version 0.5.1 ------------- - Stop supporting IPv6. StatsD doesn't support it, and it breaks things. - incr, decr, and gauge now support floating point values. Version 0.5.0 ------------- - Add support for gauges. - Add real docs and hook up ReadTheDocs. - Add support for environment var configuration. Version 0.4.0 ------------- - Look up IP addresses once per client instance. - Support IPv6. Version 0.3.0 ------------- - Improve StatsClient.timer. - Remove nasty threadlocal stuff. - Return result of StatsClient.timer. Version 0.2.0 ------------- - Optional prefix for all stats. - Introduce StatsClient.timer context decorator. pystatsd-3.3/LICENSE000066400000000000000000000020401333727011600142360ustar00rootroot00000000000000Copyright (c) 2012, James Socol Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. pystatsd-3.3/MANIFEST.in000066400000000000000000000001411333727011600147670ustar00rootroot00000000000000include AUTHORS CHANGES LICENSE MANIFEST.in README.rst include setup.py recursive-include docs * pystatsd-3.3/README.rst000066400000000000000000000036231333727011600147300ustar00rootroot00000000000000====================== A Python statsd client ====================== statsd_ is a friendly front-end to Graphite_. This is a Python client for the statsd daemon. .. image:: https://travis-ci.org/jsocol/pystatsd.png?branch=master :target: https://travis-ci.org/jsocol/pystatsd :alt: Travis-CI build status .. image:: https://img.shields.io/pypi/v/statsd.svg :target: https://pypi.python.org/pypi/statsd/ :alt: Latest release .. image:: https://img.shields.io/pypi/pyversions/statsd.svg :target: https://pypi.python.org/pypi/statsd/ :alt: Supported Python versions .. image:: https://img.shields.io/pypi/wheel/statsd.svg :target: https://pypi.python.org/pypi/statsd/ :alt: Wheel Status :Code: https://github.com/jsocol/pystatsd :License: MIT; see LICENSE file :Issues: https://github.com/jsocol/pystatsd/issues :Documentation: https://statsd.readthedocs.io/ Quickly, to use: .. code-block:: python >>> import statsd >>> c = statsd.StatsClient('localhost', 8125) >>> c.incr('foo') # Increment the 'foo' counter. >>> c.timing('stats.timed', 320) # Record a 320ms 'stats.timed'. You can also add a prefix to all your stats: .. code-block:: python >>> import statsd >>> c = statsd.StatsClient('localhost', 8125, prefix='foo') >>> c.incr('bar') # Will be 'foo.bar' in statsd/graphite. Installing ========== The easiest way to install statsd is with pip! You can install from PyPI:: $ pip install statsd Or GitHub:: $ pip install -e git+https://github.com/jsocol/pystatsd#egg=statsd Or from source:: $ git clone https://github.com/jsocol/pystatsd $ cd statsd $ python setup.py install Docs ==== There are lots of docs in the ``docs/`` directory and on ReadTheDocs_. .. _statsd: https://github.com/etsy/statsd .. _Graphite: https://graphite.readthedocs.io/ .. _ReadTheDocs: https://statsd.readthedocs.io/en/latest/index.html pystatsd-3.3/docs/000077500000000000000000000000001333727011600141655ustar00rootroot00000000000000pystatsd-3.3/docs/Makefile000066400000000000000000000127241333727011600156330ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" clean: -rm -rf $(BUILDDIR)/* html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/PythonStatsD.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/PythonStatsD.qhc" devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/PythonStatsD" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/PythonStatsD" @echo "# devhelp" epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." pystatsd-3.3/docs/conf.py000066400000000000000000000173401333727011600154710ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Python StatsD documentation build configuration file, created by # sphinx-quickstart on Mon Apr 9 15:47:23 2012. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Python StatsD' copyright = u'2015, James Socol' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '3.3' # The full version, including alpha/beta/rc tags. release = '3.3.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'PythonStatsDdoc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'PythonStatsD.tex', u'Python StatsD Documentation', u'James Socol', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'pythonstatsd', u'Python StatsD Documentation', [u'James Socol'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'PythonStatsD', u'Python StatsD Documentation', u'James Socol', 'PythonStatsD', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' pystatsd-3.3/docs/configure.rst000066400000000000000000000077441333727011600167140ustar00rootroot00000000000000.. _configuring-chapter: ================== Configuring Statsd ================== It's easy to configure and use Statsd at runtime, but there are also two shortcuts available. Runtime ======= If you are running the statsd_ server locally and on the default port, it's extremely easy: .. code-block:: python from statsd import StatsClient statsd = StatsClient() statsd.incr('foo') There are several arguments to configure your :py:class:`StatsClient` instance. They, and their defaults, are: .. code-block:: python from statsd import StatsClient statsd = StatsClient(host='localhost', port=8125, prefix=None, maxudpsize=512, ipv6=False) ``host`` is the host running the statsd server. It will support any kind of name or IP address you might use. ``port`` is the statsd server port. The default for both server and client is ``8125``. ``prefix`` helps distinguish multiple applications or environments using the same statsd server. It will be prepended to all stats, automatically. For example: .. code-block:: python from statsd import StatsClient foo_stats = StatsClient(prefix='foo') bar_stats = StatsClient(prefix='bar') foo_stats.incr('baz') bar_stats.incr('baz') will produce two different stats, ``foo.baz`` and ``bar.baz``. Without the ``prefix`` argument, or with the same ``prefix``, two ``StatsClient`` instances will update the same stats. .. versionadded:: 2.0.3 ``maxudpsize`` specifies the maximum packet size statsd will use. This is an advanced options and should not be changed unless you know what you are doing. Larger values then the default of 512 are generally deemed unsafe for use on the internet. On a controlled local network or when the statsd server is running on 127.0.0.1 larger values can decrease the number of UDP packets when pipelining many metrics. Use with care! .. versionadded:: 3.2 ``ipv6`` tells the client explicitly to look up the host using IPv6 (``True``) or IPv4 (``False``). .. note:: Python will will inherently bind to an ephemeral port on all interfaces (`0.0.0.0`) for each configured client. This is due to the underlying Sockets API in the operating system/kernel. It is safe to block incoming traffic on your firewall if you wish. TCP Clients ----------- :ref:`TCP-based clients ` have an additional ``timeout`` argument, which defaults to ``None``, and is passed to `settimeout `_. UnixSocket Clients ------------------ :ref:`UnixSocket-based clients ` have a single required ``socket_path`` argument instead of ``host`` and ``port``. In Django ========= If you are using Statsd in a Django_ application, you can configure a default :py:class:`StatsClient` in the Django settings. All of these settings are optional. Here are the settings and their defaults: .. code-block:: python STATSD_HOST = 'localhost' STATSD_PORT = 8125 STATSD_PREFIX = None STATSD_MAXUDPSIZE = 512 STATSD_IPV6 = False You can use the default :py:class:`StatsClient` simply: .. code-block:: python from statsd.defaults.django import statsd statsd.incr('foo') From the Environment ==================== StatsD isn't only useful in Django or on the web. A default instance can also be configured via environment variables. Here are the environment variables and their defaults: .. code-block:: bash STATSD_HOST=localhost STATSD_PORT=8125 STATSD_PREFIX=None STATSD_MAXUDPSIZE=512 STATSD_IPV6=0 and then in your Python application, you can simply do: .. code-block:: python from statsd.defaults.env import statsd statsd.incr('foo') .. note:: As of version 3.0, this default instance is always available, configured with the default values, unless overridden by the environment. .. _statsd: https://github.com/etsy/statsd .. _Django: https://www.djangoproject.com/ pystatsd-3.3/docs/contributing.rst000066400000000000000000000050461333727011600174330ustar00rootroot00000000000000.. _contributing-chapter: ============ Contributing ============ I happily accept patches if they make sense for the project and work well. If you aren't sure if I'll merge a patch upstream, please open an issue_ and describe it. Patches should meet the following criteria before I'll merge them: * All existing tests must pass. * Bugfixes and new features must include new tests or asserts. * Must not introduce any PEP8 or PyFlakes violations. I recommend doing all development in a virtualenv_, though this is really up to you. It would be great if new or changed features had documentation and included updates to the ``CHANGES`` file, but it's not totally necessary. Running Tests ============= To run the tests, you just need ``nose`` and ``mock``. These can be installed with ``pip``:: $ mkvirtualenv statsd $ pip install -r requirements.txt $ nosetests You can also run the tests with tox:: $ tox Tox will run the tests in Pythons 2.5, 2.6, 2.7, 3.2, 3.3, 3.4, and PyPy, if they're available. Writing Tests ============= New features or bug fixes should include tests that fail without the relevant code changes and pass with them. For example, if there is a bug in the ``StatsClient._send`` method, a new test should demonstrate the incorrect behavior by failing, and the associated changes should fix it. The failure can be a FAILURE or an ERROR. Tests and the code to fix them should be in the same commit. Bisecting should not stumble over any otherwise known failures. .. note:: Pull requests that only contain tests to demonstrate bugs are welcome, but they will be squashed with code changes to fix them. PEP8 and PyFlakes ================= The development requirements (``requirements.txt``) include the ``flake8`` tool. It is easy to run:: $ flake8 statsd/ ``flake8`` should not raise any issues or warnings. .. note:: The docs directory includes a Sphinx-generated conf.py that has several violations. That's fine, don't worry about it. Documentation ============= The documentation lives in the ``docs/`` directory and is automatically built and pushed to ReadTheDocs_. If you change or add a feature, and want to update the docs, that would be great. New features may need a new chapter. You can follow the examples already there, and be sure to add a reference to ``docs/index.rst``. Changes or very small additions may just need a new heading in an existing chapter. .. _issue: https://github.com/jsocol/pystatsd/issues .. _virtualenv: http://www.virtualenv.org/ .. _ReadTheDocs: https://statsd.readthedocs.io/ pystatsd-3.3/docs/index.rst000066400000000000000000000041561333727011600160340ustar00rootroot00000000000000.. Python StatsD documentation master file, created by sphinx-quickstart on Mon Apr 9 15:47:23 2012. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Welcome to Python StatsD's documentation! ========================================= statsd_ is a friendly front-end to Graphite_. This is a Python client for the statsd daemon. .. image:: https://travis-ci.org/jsocol/pystatsd.png?branch=master :target: https://travis-ci.org/jsocol/pystatsd :alt: Travis-CI build status .. image:: https://pypip.in/v/statsd/badge.png :target: https://pypi.python.org/pypi/statsd/ :alt: Latest release .. image:: https://pypip.in/d/statsd/badge.png :target: https://pypi.python.org/pypi/statsd/ :alt: Downloads :Code: https://github.com/jsocol/pystatsd :License: MIT; see LICENSE file :Issues: https://github.com/jsocol/pystatsd/issues :Documentation: https://statsd.readthedocs.io/ Quickly, to use: .. code-block:: pycon >>> import statsd >>> c = statsd.StatsClient('localhost', 8125) >>> c.incr('foo') # Increment the 'foo' counter. >>> c.timing('stats.timed', 320) # Record a 320ms 'stats.timed'. You can also add a prefix to all your stats: .. code-block:: pycon >>> import statsd >>> c = statsd.StatsClient('localhost', 8125, prefix='foo') >>> c.incr('bar') # Will be 'foo.bar' in statsd/graphite. Installing ---------- The easiest way to install statsd is with pip! You can install from PyPI: .. code-block:: bash $ pip install statsd Or GitHub: .. code-block:: bash $ pip install -e git+https://github.com/jsocol/pystatsd#egg=statsd Or from source: .. code-block:: bash $ git clone https://github.com/jsocol/pystatsd $ cd statsd $ python setup.py install Contents -------- .. toctree:: :maxdepth: 2 configure.rst types.rst timing.rst pipeline.rst tcp.rst unix_socket.rst reference.rst contributing.rst Indices and tables ------------------ * :ref:`search` .. _statsd: https://github.com/etsy/statsd .. _Graphite: https://graphite.readthedocs.io/ pystatsd-3.3/docs/make.bat000066400000000000000000000117641333727011600156030ustar00rootroot00000000000000@ECHO OFF REM Command file for Sphinx documentation if "%SPHINXBUILD%" == "" ( set SPHINXBUILD=sphinx-build ) set BUILDDIR=_build set ALLSPHINXOPTS=-d %BUILDDIR%/doctrees %SPHINXOPTS% . set I18NSPHINXOPTS=%SPHINXOPTS% . if NOT "%PAPER%" == "" ( set ALLSPHINXOPTS=-D latex_paper_size=%PAPER% %ALLSPHINXOPTS% set I18NSPHINXOPTS=-D latex_paper_size=%PAPER% %I18NSPHINXOPTS% ) if "%1" == "" goto help if "%1" == "help" ( :help echo.Please use `make ^` where ^ is one of echo. html to make standalone HTML files echo. dirhtml to make HTML files named index.html in directories echo. singlehtml to make a single large HTML file echo. pickle to make pickle files echo. json to make JSON files echo. htmlhelp to make HTML files and a HTML help project echo. qthelp to make HTML files and a qthelp project echo. devhelp to make HTML files and a Devhelp project echo. epub to make an epub echo. latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter echo. text to make text files echo. man to make manual pages echo. texinfo to make Texinfo files echo. gettext to make PO message catalogs echo. changes to make an overview over all changed/added/deprecated items echo. linkcheck to check all external links for integrity echo. doctest to run all doctests embedded in the documentation if enabled goto end ) if "%1" == "clean" ( for /d %%i in (%BUILDDIR%\*) do rmdir /q /s %%i del /q /s %BUILDDIR%\* goto end ) if "%1" == "html" ( %SPHINXBUILD% -b html %ALLSPHINXOPTS% %BUILDDIR%/html if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/html. goto end ) if "%1" == "dirhtml" ( %SPHINXBUILD% -b dirhtml %ALLSPHINXOPTS% %BUILDDIR%/dirhtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/dirhtml. goto end ) if "%1" == "singlehtml" ( %SPHINXBUILD% -b singlehtml %ALLSPHINXOPTS% %BUILDDIR%/singlehtml if errorlevel 1 exit /b 1 echo. echo.Build finished. The HTML pages are in %BUILDDIR%/singlehtml. goto end ) if "%1" == "pickle" ( %SPHINXBUILD% -b pickle %ALLSPHINXOPTS% %BUILDDIR%/pickle if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the pickle files. goto end ) if "%1" == "json" ( %SPHINXBUILD% -b json %ALLSPHINXOPTS% %BUILDDIR%/json if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can process the JSON files. goto end ) if "%1" == "htmlhelp" ( %SPHINXBUILD% -b htmlhelp %ALLSPHINXOPTS% %BUILDDIR%/htmlhelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run HTML Help Workshop with the ^ .hhp project file in %BUILDDIR%/htmlhelp. goto end ) if "%1" == "qthelp" ( %SPHINXBUILD% -b qthelp %ALLSPHINXOPTS% %BUILDDIR%/qthelp if errorlevel 1 exit /b 1 echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: echo.^> qcollectiongenerator %BUILDDIR%\qthelp\PythonStatsD.qhcp echo.To view the help file: echo.^> assistant -collectionFile %BUILDDIR%\qthelp\PythonStatsD.ghc goto end ) if "%1" == "devhelp" ( %SPHINXBUILD% -b devhelp %ALLSPHINXOPTS% %BUILDDIR%/devhelp if errorlevel 1 exit /b 1 echo. echo.Build finished. goto end ) if "%1" == "epub" ( %SPHINXBUILD% -b epub %ALLSPHINXOPTS% %BUILDDIR%/epub if errorlevel 1 exit /b 1 echo. echo.Build finished. The epub file is in %BUILDDIR%/epub. goto end ) if "%1" == "latex" ( %SPHINXBUILD% -b latex %ALLSPHINXOPTS% %BUILDDIR%/latex if errorlevel 1 exit /b 1 echo. echo.Build finished; the LaTeX files are in %BUILDDIR%/latex. goto end ) if "%1" == "text" ( %SPHINXBUILD% -b text %ALLSPHINXOPTS% %BUILDDIR%/text if errorlevel 1 exit /b 1 echo. echo.Build finished. The text files are in %BUILDDIR%/text. goto end ) if "%1" == "man" ( %SPHINXBUILD% -b man %ALLSPHINXOPTS% %BUILDDIR%/man if errorlevel 1 exit /b 1 echo. echo.Build finished. The manual pages are in %BUILDDIR%/man. goto end ) if "%1" == "texinfo" ( %SPHINXBUILD% -b texinfo %ALLSPHINXOPTS% %BUILDDIR%/texinfo if errorlevel 1 exit /b 1 echo. echo.Build finished. The Texinfo files are in %BUILDDIR%/texinfo. goto end ) if "%1" == "gettext" ( %SPHINXBUILD% -b gettext %I18NSPHINXOPTS% %BUILDDIR%/locale if errorlevel 1 exit /b 1 echo. echo.Build finished. The message catalogs are in %BUILDDIR%/locale. goto end ) if "%1" == "changes" ( %SPHINXBUILD% -b changes %ALLSPHINXOPTS% %BUILDDIR%/changes if errorlevel 1 exit /b 1 echo. echo.The overview file is in %BUILDDIR%/changes. goto end ) if "%1" == "linkcheck" ( %SPHINXBUILD% -b linkcheck %ALLSPHINXOPTS% %BUILDDIR%/linkcheck if errorlevel 1 exit /b 1 echo. echo.Link check complete; look for any errors in the above output ^ or in %BUILDDIR%/linkcheck/output.txt. goto end ) if "%1" == "doctest" ( %SPHINXBUILD% -b doctest %ALLSPHINXOPTS% %BUILDDIR%/doctest if errorlevel 1 exit /b 1 echo. echo.Testing of doctests in the sources finished, look at the ^ results in %BUILDDIR%/doctest/output.txt. goto end ) :end pystatsd-3.3/docs/pipeline.rst000066400000000000000000000025631333727011600165320ustar00rootroot00000000000000.. _pipeline-chapter: ========= Pipelines ========= The :py:class:`Pipeline` class is a subclass of :py:class:`StatsClient` that batches together several stats before sending. It implements the entire client interface, plus a :py:meth:`send() ` method. :py:class:`Pipeline` objects should be created with :py:meth:`StatsClient.pipeline()`: .. code-block:: python client = StatsClient() pipe = client.pipeline() pipe.incr('foo') pipe.decr('bar') pipe.timing('baz', 520) pipe.send() No stats will be sent until :py:meth:`send() ` is called, at which point they will be packed into as few UDP packets as possible. As a Context Manager ==================== :py:class:`Pipeline` objects can also be used as context managers: .. code-block:: python with StatsClient().pipeline() as pipe: pipe.incr('foo') pipe.decr('bar') :py:meth:`Pipeline.send()` will be called automatically when the managed block exits. Thread Safety ============= While :py:class:`StatsClient` instances are considered thread-safe (or at least as thread-safe as the standard library's ``socket.send`` is), :py:class:`Pipeline` instances **are not thread-safe**. Storing stats for later creates at least two important race conditions in a multi-threaded environment. You should create one :py:class:`Pipeline` per-thread, if necessary. pystatsd-3.3/docs/reference.rst000066400000000000000000000246061333727011600166650ustar00rootroot00000000000000.. _reference-chapter: ============= API Reference ============= The ``StatsClient`` provides accessors for all the types of data the statsd_ server supports. .. note:: Each public stats API method supports a ``rate`` parameter, but statsd doesn't always use it the same way. See the :ref:`types-chapter` for more information. .. py:class:: StatsClient(host='localhost', port=8125, prefix=None, maxudpsize=512) Create a new ``StatsClient`` instance with the appropriate connection and prefix information. :param str host: the hostname or IP address of the statsd_ server :param int port: the port of the statsd server :param prefix: a prefix to distinguish and group stats from an application or environment :type prefix: str or None :param int maxudpsize: the largest safe UDP packet to send. 512 is generally considered safe for the public internet, but private networks may support larger packet sizes. .. py:method:: StatsClient.incr(stat, count=1, rate=1) Increment a :ref:`counter `. :param str stat: the name of the counter to increment :param int count: the amount to increment by. Typically an integer. May be negative, but see also :py:meth:`decr() `. :param float rate: a sample rate, a float between 0 and 1. Will only send data this percentage of the time. The statsd server will take the sample rate into account for counters. .. py:method:: StatsClient.decr(stat, count=1, rate=1) Decrement a :ref:`counter `. :param str stat: the name of the counter to increment :param int count: the amount to increment by. Typically an integer. May be negative, but that will have the impact of incrementing the counter but see also :py:meth:`incr() `. :param float rate: a sample rate, a float between 0 and 1. Will only send data this percentage of the time. The statsd server will take the sample rate into account for counters .. py:method:: StatsClient.gauge(stat, value, rate=1, delta=False) Set a :ref:`gauge ` value. :param str stat: the name of the gauge to set :param value: the current value of the gauge :type value: int or float :param float rate: a sample rate, a float between 0 and 1. Will only send data this percentage of the time. The statsd server does *not* take the sample rate into account for gauges. Use with care :param bool delta: whether or not to consider this a delta value or an absolute value. See the :ref:`gauge ` type for more detail .. note:: Gauges were added to the statsd server in version 0.1.1. .. note:: Gauge deltas were added to the statsd server in version 0.6.0. .. py:method:: StatsClient.set(stat, value, rate=1) Increment a :ref:`set ` value. :param str stat: the name of the set to update :param value: the unique value to count :param float rate: a sample rate, a float between 0 and 1. Will only send data this percentage of the time. The statsd server does *not* take the sample rate into account for sets. Use with care. .. note:: Sets were added to the statsd server in version 0.6.0. .. py:method:: StatsClient.timing(stat, delta, rate=1) Record :ref:`timer ` information. :param str stat: the name of the timer to use :param delta: the number of milliseconds whatever action took. :py:class:`datetime.timedelta` objects will be converted to milliseconds :type delta: int or float or datetime.timedelta :param float rate: a sample rate, a float between 0 and 1. Will only send data this percentage of the time. The statsd server does *not* take the sample rate into account for timers. .. py:method:: StatsClient.timer(stat, rate=1) Return a :py:class:`Timer` object that can be used as a context manager or decorator to automatically record timing for a block or function call. See also the :ref:`chapter on timing `. :param str stat: the name of the timer to use :param float rate: a sample rate, a float between 0 and 1. Will only send data this percentage of the time. The statsd server does *not* take the sample rate into account for timers. .. code-block:: python with StatsClient().timer(stat, rate=1): pass # or @StatsClient().timer(stat, rate=1) def foo(): pass # or (see below for more Timer methods) timer = StatsClient().timer('foo', rate=1) with timer: pass @timer def bar(): pass .. py:method:: StatsClient.pipeline() Returns a :py:class:`Pipeline` object for collecting several stats. Can also be used as a context manager. .. code-block:: python pipe = StatsClient().pipeline() pipe.incr('foo') pipe.send() # or with StatsClient().pipeline as pipe: pipe.incr('bar') .. py:class:: Timer() The :ref:`Timer objects ` returned by :py:meth:`StatsClient.timer()`. These should never be instantiated directly. :py:class:`Timer` objects should not be shared between threads (except when used as decorators, which is thread-safe) but could be used within another context manager or decorator. For example: .. code-block:: python @contextmanager def my_context(): timer = statsd.timer('my_context_timer') timer.start() try: yield finally: timer.stop() :py:class:`Timer` objects may be reused by calling :py:meth:`start() ` again. .. py:method:: Timer.start() Causes a timer object to start counting. Called automatically when the object is used as a decorator or context manager. Returns the timer object for simplicity. .. py:method:: Timer.stop(send=True) Causes the timer object to stop timing and send the results to statsd_. Can be called with ``send=False`` to prevent immediate sending immediately, and use :py:meth:`send() `. Called automatically when the object is used as a decorator or context manager. Returns the timer object. If ``stop()`` is called before :py:meth:`start() `, a ``RuntimeError`` is raised. :param bool send: Whether to automatically send the results .. code-block:: python timer = StatsClient().timer('foo').start() timer.stop() .. py:method:: Timer.send() Causes the timer to send any unsent data. If the data has already been sent, or has not yet been recorded, a ``RuntimeError`` is raised. .. code-block:: python timer = StatsClient().timer('foo').start() timer.stop(send=False) timer.send() .. note:: See the note abbout :ref:`timer objects and pipelines `. .. py:class:: Pipeline() A :ref:`Pipeline ` object that can be used to collect and send several stats at once. Useful for reducing network traffic and speeding up instrumentation under certain loads. Can be used as a context manager. Pipeline extends :py:class:`StatsClient` and has all associated methods. .. code-block:: python pipe = StatsClient().pipeline() pipe.incr('foo') pipe.send() # or with StatsClient().pipeline as pipe: pipe.incr('bar') .. py:method:: Pipeline.send() Causes the :py:class:`Pipeline` object to send all batched stats in as few packets as possible. .. py:class:: TCPStatsClient(host='localhost', port=8125, prefix=None, timeout=None, ipv6=False) Create a new ``TCPStatsClient`` instance with the appropriate connection and prefix information. :param str host: the hostname or IP address of the statsd_ server :param int port: the port of the statsd server :param prefix: a prefix to distinguish and group stats from an application or environment. :type prefix: str or None :param float timeout: socket timeout for any actions on the connection socket. ``TCPStatsClient`` implements all methods of :py:class:`StatsClient`, including :py:meth:`pipeline() `, with the difference that it is not thread safe and it can raise exceptions on connection errors. Unlike :py:class:`StatsClient` it uses a TCP connection to communicate with StatsD. In addition to the stats methods, ``TCPStatsClient`` supports the following TCP-specific methods. .. py:method:: TCPStatsClient.close() Closes a connection that's currently open and deletes it's socket. If this is called on a :py:class:`TCPStatsClient` which currently has no open connection it is a non-action. .. code-block:: python from statsd import TCPStatsClient statsd = TCPStatsClient() statsd.incr('some.event') statsd.close() .. py:method:: TCPStatsClient.connect() Creates a connection to StatsD. If there are errors like connection timed out or connection refused, the according exceptions will be raised. It is usually not necessary to call this method because sending data to StatsD will call ``connect`` implicitely if the current instance of :py:class:`TCPStatsClient` does not already hold an open connection. .. code-block:: python from statsd import TCPStatsClient statsd = TCPStatsClient() statsd.incr('some.event') # calls connect() internally statsd.close() statsd.connect() # creates new connection .. py:method:: TCPStatsClient.reconnect() Closes a currently existing connection and replaces it with a new one. If no connection exists already it will simply create a new one. Internally this does nothing else than calling :py:meth:`close() ` and :py:meth:`connect() `. .. code-block:: python from statsd import TCPStatsClient statsd = TCPStatsClient() statsd.incr('some.event') statsd.reconnect() # closes open connection and creates new one .. py:class:: UnixSocketStatsClient(socket_path, prefix=None, timeout=None) A version of :py:class:`StatsClient` that communicates over Unix sockets. It implements all methods of :py:class:`StatsClient`. :param str socket_path: the path to the (writeable) Unix socket :param prefix: a prefix to distinguish and group stats from an application or environment :type prefix: str or None :param float timeout: socket timeout for any actions on the connection socket. .. _statsd: https://github.com/etsy/statsd pystatsd-3.3/docs/tags.rst000066400000000000000000000070261333727011600156620ustar00rootroot00000000000000.. _tags-chapter: ============================ Unsupported: Tagging Metrics ============================ Tagged metrics—such as those used by Datadog_ and `Telegraf`_—are explicitly outside the scope of this library. Alternatives_ exist and are recommended. This document lays out the reasons to avoid support for tags. Aggregating and Disaggregating Metrics ====================================== Given a simple metric, like a :ref:`counter ` or :ref:`timer-type`, the very first operation StatsD will perform is an aggregation over time. For example, over a 30-second window, calculate the total number of events (a counter) or several aggregations like average, median, 90th percentile (a timer). A very common next step is for users to want to perform additional aggregations. For example, if we're timing a ``/widgets`` API endpoint for both ``GET`` and ``POST`` requests, we might want to know the median time across both HTTP methods. *Without* tags, we must start with the most disaggregated metrics, e.g.:: statsd.timing('api.widgets.GET', response_time) statsd.timing('api.widgets.POST', response_time) We can then *aggregate* these metrics with wildcards (e.g. in Graphite):: weightedAverage(api.widgets.*.mean, api.widgets.*.count) However, *with* tags, we have an alternative approach: to use a single, aggregated metric name, and *disaggregate* via tags, e.g.:: statsd.timing('api.widgets', response_time, {'method': 'GET'}) statsd.timing('api.widgets', response_time, {'method': 'POST'}) By default, queries for the ``api.widgets`` timer will include all requests, but may be filtered to specific subsets with tags (e.g. in Datadog):: api.widgets.mean{method:GET} Naming Metrics ============== The examples above demonstrate that there is a fundamental change in how metrics must be named, particularly in the absence of tags, to avoid data loss. If tags are not supported, there is no way to disaggregate ``api.widgets`` into its ``GET`` and ``POST`` subsets. Thus, it is incredibly important that an application be written with specific metrics capabilities in mind. If using a metrics system that does not support tags, like StatsD_ or StatsDaemon_, metric names must be disaggregated by default. If using a system that *does* support tags, like Datadog or Telegraf, metric names may be aggregated by default. If an application is expecting tags to work but they are not supported by the underlying metrics system, the best case scenario is a loss of data resolution. The worst case scenario is a complete loss of data, if the metrics system is incapable of correctly parsing the extended metric data. Explicit Opt-in =============== Given that the best case scenario for a mismatch of application and metrics system is a form of data loss, the choice to use metrics with tags must be incredibly explicit. Technically, this library is capable of sending metrics to Datadog_ and Telegraf_, as well as StatsD_. However, to take advantage of these, you'll need to change your strategy for naming—and tagging—metrics. To avoid silently failing, this library forces you to make an explicit change to how you send metrics to these systems. At a minimum, you must touch every file that has ``import statsd``, but that's not really enough: you need to touch every metrics call. .. _Datadog: https://www.datadoghq.com/ .. _Telegraf: https://github.com/influxdata/telegraf .. _Alternatives: https://pypi.org/project/statsd-tags/ .. _StatsD: https://github.com/etsy/statsd .. _StatsDaemon: https://github.com/bitly/statsdaemon pystatsd-3.3/docs/tcp.rst000066400000000000000000000014241333727011600155060ustar00rootroot00000000000000.. _tcp-chapter: ============== TCPStatsClient ============== .. code-block:: python statsd = TCPStatsClient(host='1.2.3.4', port=8126, timeout=1.) The :py:class:`TCPStatsClient` class has a very similar interface to :py:class:`StatsClient`, but internally it uses TCP connections instead of UDP. These are the main differences when using ``TCPStatsClient`` compared to ``StatsClient``: * The constructor supports a ``timeout`` parameter to set a timeout on all socket actions. * :py:meth:`connect() ` and all methods that send data can potentially raise socket exceptions. * **It is not thread-safe**, so it is recommended to not share it across threads unless a lot of attention is paid to make sure that no two threads ever use it at once. pystatsd-3.3/docs/timing.rst000066400000000000000000000100121333727011600162000ustar00rootroot00000000000000.. _timing-chapter: ============ Using Timers ============ :ref:`Timers ` are an incredibly powerful tool for tracking application performance. Statsd provides a number of ways to use them to instrument your code. There are four ways to use timers. Calling ``timing`` manually =========================== The simplest way to use a timer is to record the time yourself and send it manually, using the :py:meth:`StatsClient.timing()` method: .. code-block:: python import time from datetime import datetime from statsd import StatsClient statsd = StatsClient() # Pass milliseconds directly start = time.time() time.sleep(3) # You must convert to milliseconds: dt = int((time.time() - start) * 1000) statsd.timing('slept', dt) # Or pass a timedelta start = datetime.utcnow() time.sleep(3) dt = datetime.utcnow() - start statsd.timing('slept', dt) .. _timer-context-manager: Using a context manager ======================= The :py:meth:`StatsClient.timer()` method will return a :py:class:`Timer` object that can be used as both a context manager and a thread-safe decorator. When used as a context manager, it will automatically report the time taken for the inner block: .. code-block:: python from statsd import StatsClient statsd = StatsClient() with statsd.timer('foo'): # This block will be timed. for i in xrange(0, 100000): i ** 2 # The timing is sent immediately when the managed block exits. .. _timer-decorator: Using a decorator ================= :py:class:`Timer` objects can be used to decorate a method in a thread-safe manner. Every time the decorated function is called, the time it took to execute will be sent to the statsd server. .. code-block:: python from statsd import StatsClient statsd = StatsClient() @statsd.timer('myfunc') def myfunc(a, b): """Calculate the most complicated thing a and b can do.""" # Timing information will be sent every time the function is called. myfunc(1, 2) myfunc(3, 7) .. _timer-object: Using a Timer object directly ============================= .. versionadded:: 2.1 :py:class:`Timer` objects function as context managers and as decorators, but they can also be used directly. (Flat is, after all, better than nested.) .. code-block:: python from statsd import StatsClient statsd = StatsClient() foo_timer = statsd.timer('foo') foo_timer.start() # Do something fun. foo_timer.stop() When :py:meth:`Timer.stop()` is called, a :ref:`timing stat ` will automatically be sent to StatsD. You can over ride this behavior with the ``send=False`` keyword argument to :py:meth:`stop() `: .. code-block:: python foo_timer.stop(send=False) foo_timer.send() Use :py:meth:`Timer.send()` to send the stat when you're ready. .. _timer-direct-note: .. note:: This use of timers is compatible with :ref:`Pipelines ` but the ``send()`` method may not behave exactly as expected. Timing data *must* be sent, either by calling ``stop()`` without ``send=False`` or calling ``send()`` explicitly, in order for it to be included in the pipeline. However, it will *not* be sent immediately. .. code-block:: python with statsd.pipeline() as pipe: foo_timer = pipe.timer('foo').start() # Do something... pipe.incr('bar') foo_timer.stop() # Will be sent when the managed block exits. with statsd.pipeline() as pipe: foo_timer = pipe.timer('foo').start() # Do something... pipe.incr('bar') foo_timer.stop(send=False) # Will not be sent. foo_timer.send() # Will be sent when the managed block exits. # Do something else... with statsd.pipeline() as pipe: foo_timer = pipe.timer('foo').start() pipe.incr('bar') # Do something... foo_timer.stop(send=False) # Data will _not_ be sent pystatsd-3.3/docs/types.rst000066400000000000000000000122211333727011600160610ustar00rootroot00000000000000.. _types-chapter: ========== Data Types ========== The statsd_ server supports a number of different data types, and performs different aggregation on each of them. The three main types are *counters*, *timers*, and *gauges*. The statsd server collects and aggregates in 30 second intervals before flushing to Graphite_. Graphite usually stores the most recent data in 1-minute averaged buckets, so when you're looking at a graph, for each stat you are typically seeing the average value over that minute. .. _counter-type: Counters ======== *Counters* are the most basic and default type. They are treated as a count of a type of event per second, and are, in Graphite_, typically averaged over one minute. That is, when looking at a graph, you are usually seeing the average number of events per second during a one-minute period. The statsd server collects counters under the ``stats`` prefix. Counters are managed with the :py:meth:`StatsClient.incr()` and :py:meth:`StatsClient.decr()` methods: .. code-block:: python from statsd import StatsClient statsd = StatsClient() statsd.incr('some.event') You can increment a counter by more than one by passing a second parameter: .. code-block:: python statsd.incr('some.other.event', 10) You can also use the ``rate`` parameter to produce sampled data. The statsd server will take the sample rate into account, and the :py:class:`StatsClient` will only send data ``rate`` percent of the time. This can help the statsd server stay responsive with extremely busy applications. ``rate`` is a float between 0 and 1: .. code-block:: python # Increment this counter 10% of the time. statsd.incr('some.third.event', rate=0.1) Because the statsd server is aware of the sampling, it will still show you the true average rate per second. You can also decrement counters. The :py:meth:`StatsClient.decr()` method takes the same arguments as ``incr``: .. code-block:: python statsd.decr('some.other.event') # Decrease the counter by 5, 15% sample. statsd.decr('some.third.event', 5, rate=0.15) .. _timer-type: Timers ====== *Timers* are meant to track how long something took. They are an invaluable tool for tracking application performance. The statsd server collects all timers under the ``stats.timers`` prefix, and will calculate the lower bound, mean, 90th percentile, upper bound, and count of each timer for each period (by the time you see it in Graphite, that's usually per minute). * The *lower bound* is the lowest value statsd saw for that stat during that time period. * The *mean* is the average of all values statsd saw for that stat during that time period. * The *90th percentile* is a value *x* such that 90% of all the values statsd saw for that stat during that time period are below *x*, and 10% are above. This is a great number to try to optimize. * The *upper bound* is the highest value statsd saw for that stat during that time period. * The *count* is the number of timings statsd saw for that stat during that time period. It is not averaged. The statsd server only operates in millisecond timings. Everything should be converted to milliseconds. The ``rate`` parameter will sample the data being sent to the statsd server, but in this case it doesn't make sense for the statsd server to take it into account (except possibly for the *count* value, but then it would be lying about how much data it averaged). See the :ref:`timing documentation ` for more detail on using timers with Statsd. .. _gauge-type: Gauges ====== *Gauges* are a constant data type. They are not subject to averaging, and they don't change unless you change them. That is, once you set a gauge value, it will be a flat line on the graph until you change it again. Gauges are useful for things that are already averaged, or don't need to reset periodically. System load, for example, could be graphed with a gauge. You might use :py:meth:`StatsClient.incr` to count the number of logins to a system, but a gauge to track how many active WebSocket connections you have. The statsd server collects gauges under the ``stats.gauges`` prefix. The :py:meth:`StatsClient.gauge` method also support the ``rate`` parameter to sample data back to the statsd server, but use it with care, especially with gauges that may not be updated very often. Gauge Deltas ------------ Gauges may be *updated* (as opposed to *set*) by setting the ``delta`` keyword argument to ``True``. For example: .. code-block:: python statsd.gauge('foo', 70) # Set the 'foo' gauge to 70. statsd.gauge('foo', 1, delta=True) # Set 'foo' to 71. statsd.gauge('foo', -3, delta=True) # Set 'foo' to 68. .. note:: Support for gauge deltas was added to the server in 0.6.0. .. _set-type: Sets ==== *Sets* count the number of unique values passed to a key. For example, you could count the number of users accessing your system using: .. code-block:: python statsd.set('users', userid) If :py:meth:`StatsClient.set()` is called multiple times with the same userid in the same sample period, that userid will only be counted once. .. _statsd: https://github.com/etsy/statsd .. _Graphite: https://graphite.readthedocs.io pystatsd-3.3/docs/unix_socket.rst000066400000000000000000000012151333727011600172510ustar00rootroot00000000000000.. _unix-socket-chapter: ===================== UnixSocketStatsClient ===================== .. code-block:: python statsd = UnixSocketStatsClient(socket_path='/var/run/stats.sock') The :py:class:`UnixSocketStatsClient` class has a very similar interface to :py:class:`TCPStatsClient`, but internally it uses Unix Domain sockets instead of TCP. These are the main differences when using ``UnixSocketStatsClient`` compared to ``StatsClient``: * The ``socket_path`` parameter is required. It has no default. * The ``host``, ``port`` and ``ipv6`` parameters are not allowed. * The application process must have permission to write to the socket. pystatsd-3.3/requirements.txt000066400000000000000000000000461333727011600165210ustar00rootroot00000000000000mock==1.0.1 nose==1.2.1 flake8==1.7.0 pystatsd-3.3/setup.cfg000066400000000000000000000000341333727011600150530ustar00rootroot00000000000000[bdist_wheel] universal = 1 pystatsd-3.3/setup.py000066400000000000000000000021311333727011600147440ustar00rootroot00000000000000from setuptools import find_packages, setup setup( name='statsd', version='3.3.0', description='A simple statsd client.', long_description=open('README.rst').read(), author='James Socol', author_email='james@mozilla.com', url='https://github.com/jsocol/pystatsd', license='MIT', packages=find_packages(), include_package_data=True, package_data={'': ['README.rst']}, test_suite='nose.collector', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Web Environment', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) pystatsd-3.3/statsd/000077500000000000000000000000001333727011600145375ustar00rootroot00000000000000pystatsd-3.3/statsd/__init__.py000066400000000000000000000004321333727011600166470ustar00rootroot00000000000000from __future__ import absolute_import from .client import StatsClient from .client import TCPStatsClient from .client import UnixSocketStatsClient VERSION = (3, 2, 1) __version__ = '.'.join(map(str, VERSION)) __all__ = ['StatsClient', 'TCPStatsClient', 'UnixSocketStatsClient'] pystatsd-3.3/statsd/client/000077500000000000000000000000001333727011600160155ustar00rootroot00000000000000pystatsd-3.3/statsd/client/__init__.py000066400000000000000000000002531333727011600201260ustar00rootroot00000000000000from __future__ import absolute_import, division, unicode_literals from .stream import TCPStatsClient, UnixSocketStatsClient # noqa from .udp import StatsClient # noqa pystatsd-3.3/statsd/client/base.py000066400000000000000000000053171333727011600173070ustar00rootroot00000000000000from __future__ import absolute_import, division, unicode_literals import random from collections import deque from datetime import timedelta from .timer import Timer class StatsClientBase(object): """A Base class for various statsd clients.""" def _send(self): raise NotImplementedError() def pipeline(self): raise NotImplementedError() def timer(self, stat, rate=1): return Timer(self, stat, rate) def timing(self, stat, delta, rate=1): """ Send new timing information. `delta` can be either a number of milliseconds or a timedelta. """ if isinstance(delta, timedelta): # Convert timedelta to number of milliseconds. delta = delta.total_seconds() * 1000. self._send_stat(stat, '%0.6f|ms' % delta, rate) def incr(self, stat, count=1, rate=1): """Increment a stat by `count`.""" self._send_stat(stat, '%s|c' % count, rate) def decr(self, stat, count=1, rate=1): """Decrement a stat by `count`.""" self.incr(stat, -count, rate) def gauge(self, stat, value, rate=1, delta=False): """Set a gauge value.""" if value < 0 and not delta: if rate < 1: if random.random() > rate: return with self.pipeline() as pipe: pipe._send_stat(stat, '0|g', 1) pipe._send_stat(stat, '%s|g' % value, 1) else: prefix = '+' if delta and value >= 0 else '' self._send_stat(stat, '%s%s|g' % (prefix, value), rate) def set(self, stat, value, rate=1): """Set a set value.""" self._send_stat(stat, '%s|s' % value, rate) def _send_stat(self, stat, value, rate): self._after(self._prepare(stat, value, rate)) def _prepare(self, stat, value, rate): if rate < 1: if random.random() > rate: return value = '%s|@%s' % (value, rate) if self._prefix: stat = '%s.%s' % (self._prefix, stat) return '%s:%s' % (stat, value) def _after(self, data): if data: self._send(data) class PipelineBase(StatsClientBase): def __init__(self, client): self._client = client self._prefix = client._prefix self._stats = deque() def _send(self): raise NotImplementedError() def _after(self, data): if data is not None: self._stats.append(data) def __enter__(self): return self def __exit__(self, typ, value, tb): self.send() def send(self): if not self._stats: return self._send() def pipeline(self): return self.__class__(self) pystatsd-3.3/statsd/client/stream.py000066400000000000000000000041131333727011600176610ustar00rootroot00000000000000from __future__ import absolute_import, division, unicode_literals import socket from .base import StatsClientBase, PipelineBase class StreamPipeline(PipelineBase): def _send(self): self._client._after('\n'.join(self._stats)) self._stats.clear() class StreamClientBase(StatsClientBase): def connect(self): raise NotImplementedError() def close(self): if self._sock and hasattr(self._sock, 'close'): self._sock.close() self._sock = None def reconnect(self): self.close() self.connect() def pipeline(self): return StreamPipeline(self) def _send(self, data): """Send data to statsd.""" if not self._sock: self.connect() self._do_send(data) def _do_send(self, data): self._sock.sendall(data.encode('ascii') + b'\n') class TCPStatsClient(StreamClientBase): """TCP version of StatsClient.""" def __init__(self, host='localhost', port=8125, prefix=None, timeout=None, ipv6=False): """Create a new client.""" self._host = host self._port = port self._ipv6 = ipv6 self._timeout = timeout self._prefix = prefix self._sock = None def connect(self): fam = socket.AF_INET6 if self._ipv6 else socket.AF_INET family, _, _, _, addr = socket.getaddrinfo( self._host, self._port, fam, socket.SOCK_STREAM)[0] self._sock = socket.socket(family, socket.SOCK_STREAM) self._sock.settimeout(self._timeout) self._sock.connect(addr) class UnixSocketStatsClient(StreamClientBase): """Unix domain socket version of StatsClient.""" def __init__(self, socket_path, prefix=None, timeout=None): """Create a new client.""" self._socket_path = socket_path self._timeout = timeout self._prefix = prefix self._sock = None def connect(self): self._sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self._sock.settimeout(self._timeout) self._sock.connect(self._socket_path) pystatsd-3.3/statsd/client/timer.py000066400000000000000000000040171333727011600175110ustar00rootroot00000000000000from __future__ import absolute_import, division, unicode_literals import functools # Use timer that's not susceptible to time of day adjustments. try: # perf_counter is only present on Py3.3+ from time import perf_counter as time_now except ImportError: # fall back to using time from time import time as time_now def safe_wraps(wrapper, *args, **kwargs): """Safely wraps partial functions.""" while isinstance(wrapper, functools.partial): wrapper = wrapper.func return functools.wraps(wrapper, *args, **kwargs) class Timer(object): """A context manager/decorator for statsd.timing().""" def __init__(self, client, stat, rate=1): self.client = client self.stat = stat self.rate = rate self.ms = None self._sent = False self._start_time = None def __call__(self, f): """Thread-safe timing function decorator.""" @safe_wraps(f) def _wrapped(*args, **kwargs): start_time = time_now() try: return f(*args, **kwargs) finally: elapsed_time_ms = 1000.0 * (time_now() - start_time) self.client.timing(self.stat, elapsed_time_ms, self.rate) return _wrapped def __enter__(self): return self.start() def __exit__(self, typ, value, tb): self.stop() def start(self): self.ms = None self._sent = False self._start_time = time_now() return self def stop(self, send=True): if self._start_time is None: raise RuntimeError('Timer has not started.') dt = time_now() - self._start_time self.ms = 1000.0 * dt # Convert to milliseconds. if send: self.send() return self def send(self): if self.ms is None: raise RuntimeError('No data recorded.') if self._sent: raise RuntimeError('Already sent data.') self._sent = True self.client.timing(self.stat, self.ms, self.rate) pystatsd-3.3/statsd/client/udp.py000066400000000000000000000027731333727011600171700ustar00rootroot00000000000000from __future__ import absolute_import, division, unicode_literals import socket from .base import StatsClientBase, PipelineBase class Pipeline(PipelineBase): def __init__(self, client): super(Pipeline, self).__init__(client) self._maxudpsize = client._maxudpsize def _send(self): data = self._stats.popleft() while self._stats: # Use popleft to preserve the order of the stats. stat = self._stats.popleft() if len(stat) + len(data) + 1 >= self._maxudpsize: self._client._after(data) data = stat else: data += '\n' + stat self._client._after(data) class StatsClient(StatsClientBase): """A client for statsd.""" def __init__(self, host='localhost', port=8125, prefix=None, maxudpsize=512, ipv6=False): """Create a new client.""" fam = socket.AF_INET6 if ipv6 else socket.AF_INET family, _, _, _, addr = socket.getaddrinfo( host, port, fam, socket.SOCK_DGRAM)[0] self._addr = addr self._sock = socket.socket(family, socket.SOCK_DGRAM) self._prefix = prefix self._maxudpsize = maxudpsize def _send(self, data): """Send data to statsd.""" try: self._sock.sendto(data.encode('ascii'), self._addr) except (socket.error, RuntimeError): # No time for love, Dr. Jones! pass def pipeline(self): return Pipeline(self) pystatsd-3.3/statsd/defaults/000077500000000000000000000000001333727011600163465ustar00rootroot00000000000000pystatsd-3.3/statsd/defaults/__init__.py000066400000000000000000000001131333727011600204520ustar00rootroot00000000000000HOST = 'localhost' PORT = 8125 IPV6 = False PREFIX = None MAXUDPSIZE = 512 pystatsd-3.3/statsd/defaults/django.py000066400000000000000000000011471333727011600201650ustar00rootroot00000000000000from __future__ import absolute_import from django.conf import settings from statsd import defaults from statsd.client import StatsClient statsd = None if statsd is None: host = getattr(settings, 'STATSD_HOST', defaults.HOST) port = getattr(settings, 'STATSD_PORT', defaults.PORT) prefix = getattr(settings, 'STATSD_PREFIX', defaults.PREFIX) maxudpsize = getattr(settings, 'STATSD_MAXUDPSIZE', defaults.MAXUDPSIZE) ipv6 = getattr(settings, 'STATSD_IPV6', defaults.IPV6) statsd = StatsClient(host=host, port=port, prefix=prefix, maxudpsize=maxudpsize, ipv6=ipv6) pystatsd-3.3/statsd/defaults/env.py000066400000000000000000000010751333727011600175130ustar00rootroot00000000000000from __future__ import absolute_import import os from statsd import defaults from statsd.client import StatsClient statsd = None if statsd is None: host = os.getenv('STATSD_HOST', defaults.HOST) port = int(os.getenv('STATSD_PORT', defaults.PORT)) prefix = os.getenv('STATSD_PREFIX', defaults.PREFIX) maxudpsize = int(os.getenv('STATSD_MAXUDPSIZE', defaults.MAXUDPSIZE)) ipv6 = bool(int(os.getenv('STATSD_IPV6', defaults.IPV6))) statsd = StatsClient(host=host, port=port, prefix=prefix, maxudpsize=maxudpsize, ipv6=ipv6) pystatsd-3.3/statsd/tests.py000066400000000000000000000644751333727011600162730ustar00rootroot00000000000000from __future__ import with_statement import functools import random import re import socket from datetime import timedelta from unittest import SkipTest import mock from nose.tools import eq_ from statsd import StatsClient from statsd import TCPStatsClient from statsd import UnixSocketStatsClient ADDR = (socket.gethostbyname('localhost'), 8125) UNIX_SOCKET = 'tmp.socket' # proto specific methods to get the socket method to send data send_method = { 'udp': lambda x: x.sendto, 'tcp': lambda x: x.sendall, 'unix': lambda x: x.sendall, } # proto specific methods to create the expected value make_val = { 'udp': lambda x, addr: mock.call(str.encode(x), addr), 'tcp': lambda x, addr: mock.call(str.encode(x + '\n')), 'unix': lambda x, addr: mock.call(str.encode(x + '\n')), } def _udp_client(prefix=None, addr=None, port=None, ipv6=False): if not addr: addr = ADDR[0] if not port: port = ADDR[1] sc = StatsClient(host=addr, port=port, prefix=prefix, ipv6=ipv6) sc._sock = mock.Mock() return sc def _tcp_client(prefix=None, addr=None, port=None, timeout=None, ipv6=False): if not addr: addr = ADDR[0] if not port: port = ADDR[1] sc = TCPStatsClient(host=addr, port=port, prefix=prefix, timeout=timeout, ipv6=ipv6) sc._sock = mock.Mock() return sc def _unix_socket_client(prefix=None, socket_path=None): if not socket_path: socket_path = UNIX_SOCKET sc = UnixSocketStatsClient(socket_path=socket_path, prefix=prefix) sc._sock = mock.Mock() return sc def _timer_check(sock, count, proto, start, end): send = send_method[proto](sock) eq_(send.call_count, count) value = send.call_args[0][0].decode('ascii') exp = re.compile('^%s:\d+|%s$' % (start, end)) assert exp.match(value) def _sock_check(sock, count, proto, val=None, addr=None): send = send_method[proto](sock) eq_(send.call_count, count) if not addr: addr = ADDR if val is not None: eq_( send.call_args, make_val[proto](val, addr), ) class assert_raises(object): """A context manager that asserts a given exception was raised. >>> with assert_raises(TypeError): ... raise TypeError >>> with assert_raises(TypeError): ... raise ValueError AssertionError: ValueError not in ['TypeError'] >>> with assert_raises(TypeError): ... pass AssertionError: No exception raised. Or you can specify any of a number of exceptions: >>> with assert_raises(TypeError, ValueError): ... raise ValueError >>> with assert_raises(TypeError, ValueError): ... raise KeyError AssertionError: KeyError not in ['TypeError', 'ValueError'] You can also get the exception back later: >>> with assert_raises(TypeError) as cm: ... raise TypeError('bad type!') >>> cm.exception TypeError('bad type!') >>> cm.exc_type TypeError >>> cm.traceback Lowercase name because that it's a class is an implementation detail. """ def __init__(self, *exc_cls): self.exc_cls = exc_cls def __enter__(self): # For access to the exception later. return self def __exit__(self, typ, value, tb): assert typ, 'No exception raised.' assert typ in self.exc_cls, '%s not in %s' % ( typ.__name__, [e.__name__ for e in self.exc_cls]) self.exc_type = typ self.exception = value self.traceback = tb # Swallow expected exceptions. return True def _test_incr(cl, proto): cl.incr('foo') _sock_check(cl._sock, 1, proto, val='foo:1|c') cl.incr('foo', 10) _sock_check(cl._sock, 2, proto, val='foo:10|c') cl.incr('foo', 1.2) _sock_check(cl._sock, 3, proto, val='foo:1.2|c') cl.incr('foo', 10, rate=0.5) _sock_check(cl._sock, 4, proto, val='foo:10|c|@0.5') @mock.patch.object(random, 'random', lambda: -1) def test_incr_udp(): """StatsClient.incr works.""" cl = _udp_client() _test_incr(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_incr_tcp(): """TCPStatsClient.incr works.""" cl = _tcp_client() _test_incr(cl, 'tcp') @mock.patch.object(random, 'random', lambda: -1) def test_incr_unix_socket(): """TCPStatsClient.incr works.""" cl = _unix_socket_client() _test_incr(cl, 'unix') def _test_decr(cl, proto): cl.decr('foo') _sock_check(cl._sock, 1, proto, 'foo:-1|c') cl.decr('foo', 10) _sock_check(cl._sock, 2, proto, 'foo:-10|c') cl.decr('foo', 1.2) _sock_check(cl._sock, 3, proto, 'foo:-1.2|c') cl.decr('foo', 1, rate=0.5) _sock_check(cl._sock, 4, proto, 'foo:-1|c|@0.5') @mock.patch.object(random, 'random', lambda: -1) def test_decr_udp(): """StatsClient.decr works.""" cl = _udp_client() _test_decr(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_decr_tcp(): """TCPStatsClient.decr works.""" cl = _tcp_client() _test_decr(cl, 'tcp') @mock.patch.object(random, 'random', lambda: -1) def test_decr_unix_socket(): """TCPStatsClient.decr works.""" cl = _unix_socket_client() _test_decr(cl, 'unix') def _test_gauge(cl, proto): cl.gauge('foo', 30) _sock_check(cl._sock, 1, proto, 'foo:30|g') cl.gauge('foo', 1.2) _sock_check(cl._sock, 2, proto, 'foo:1.2|g') cl.gauge('foo', 70, rate=0.5) _sock_check(cl._sock, 3, proto, 'foo:70|g|@0.5') @mock.patch.object(random, 'random', lambda: -1) def test_gauge_udp(): """StatsClient.gauge works.""" cl = _udp_client() _test_gauge(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_gauge_tcp(): """TCPStatsClient.gauge works.""" cl = _tcp_client() _test_gauge(cl, 'tcp') @mock.patch.object(random, 'random', lambda: -1) def test_gauge_unix_socket(): """TCPStatsClient.decr works.""" cl = _unix_socket_client() _test_gauge(cl, 'unix') def _test_ipv6(cl, proto, addr): cl.gauge('foo', 30) _sock_check(cl._sock, 1, proto, 'foo:30|g', addr=addr) def test_ipv6_udp(): """StatsClient can use to IPv6 address.""" addr = ('::1', 8125, 0, 0) cl = _udp_client(addr=addr[0], ipv6=True) _test_ipv6(cl, 'udp', addr) def test_ipv6_tcp(): """TCPStatsClient can use to IPv6 address.""" addr = ('::1', 8125, 0, 0) cl = _tcp_client(addr=addr[0], ipv6=True) _test_ipv6(cl, 'tcp', addr) def _test_resolution(cl, proto, addr): cl.incr('foo') _sock_check(cl._sock, 1, proto, 'foo:1|c', addr=addr) def test_ipv6_resolution_udp(): raise SkipTest('IPv6 resolution is broken on Travis') cl = _udp_client(addr='localhost', ipv6=True) _test_resolution(cl, 'udp', ('::1', 8125, 0, 0)) def test_ipv6_resolution_tcp(): cl = _tcp_client(addr='localhost', ipv6=True) _test_resolution(cl, 'tcp', ('::1', 8125, 0, 0)) def test_ipv4_resolution_udp(): cl = _udp_client(addr='localhost') _test_resolution(cl, 'udp', ('127.0.0.1', 8125)) def test_ipv4_resolution_tcp(): cl = _tcp_client(addr='localhost') _test_resolution(cl, 'tcp', ('127.0.0.1', 8125)) def _test_gauge_delta(cl, proto): tests = ( (12, '+12'), (-13, '-13'), (1.2, '+1.2'), (-1.3, '-1.3'), ) def _check(num, result): cl._sock.reset_mock() cl.gauge('foo', num, delta=True) _sock_check(cl._sock, 1, proto, 'foo:%s|g' % result) for num, result in tests: _check(num, result) @mock.patch.object(random, 'random', lambda: -1) def test_gauge_delta_udp(): """StatsClient.gauge works with delta values.""" cl = _udp_client() _test_gauge_delta(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_gauge_delta_tcp(): """TCPStatsClient.gauge works with delta values.""" cl = _tcp_client() _test_gauge_delta(cl, 'tcp') def _test_gauge_absolute_negative(cl, proto): cl.gauge('foo', -5, delta=False) _sock_check(cl._sock, 1, 'foo:0|g\nfoo:-5|g') @mock.patch.object(random, 'random', lambda: -1) def test_gauge_absolute_negative_udp(): """StatsClient.gauge works with absolute negative value.""" cl = _udp_client() _test_gauge_delta(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_gauge_absolute_negative_tcp(): """TCPStatsClient.gauge works with absolute negative value.""" cl = _tcp_client() _test_gauge_delta(cl, 'tcp') def _test_gauge_absolute_negative_rate(cl, proto, mock_random): mock_random.return_value = -1 cl.gauge('foo', -1, rate=0.5, delta=False) _sock_check(cl._sock, 1, proto, 'foo:0|g\nfoo:-1|g') mock_random.return_value = 2 cl.gauge('foo', -2, rate=0.5, delta=False) # Should not have changed. _sock_check(cl._sock, 1, proto, 'foo:0|g\nfoo:-1|g') @mock.patch.object(random, 'random') def test_gauge_absolute_negative_rate_udp(mock_random): """StatsClient.gauge works with absolute negative value and rate.""" cl = _udp_client() _test_gauge_absolute_negative_rate(cl, 'udp', mock_random) @mock.patch.object(random, 'random') def test_gauge_absolute_negative_rate_tcp(mock_random): """TCPStatsClient.gauge works with absolute negative value and rate.""" cl = _tcp_client() _test_gauge_absolute_negative_rate(cl, 'tcp', mock_random) def _test_set(cl, proto): cl.set('foo', 10) _sock_check(cl._sock, 1, proto, 'foo:10|s') cl.set('foo', 2.3) _sock_check(cl._sock, 2, proto, 'foo:2.3|s') cl.set('foo', 'bar') _sock_check(cl._sock, 3, proto, 'foo:bar|s') cl.set('foo', 2.3, 0.5) _sock_check(cl._sock, 4, proto, 'foo:2.3|s|@0.5') @mock.patch.object(random, 'random', lambda: -1) def test_set_udp(): """StatsClient.set works.""" cl = _udp_client() _test_set(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_set_tcp(): """TCPStatsClient.set works.""" cl = _tcp_client() _test_set(cl, 'tcp') def _test_timing(cl, proto): cl.timing('foo', 100) _sock_check(cl._sock, 1, proto, 'foo:100.000000|ms') cl.timing('foo', 350) _sock_check(cl._sock, 2, proto, 'foo:350.000000|ms') cl.timing('foo', 100, rate=0.5) _sock_check(cl._sock, 3, proto, 'foo:100.000000|ms|@0.5') @mock.patch.object(random, 'random', lambda: -1) def test_timing_udp(): """StatsClient.timing works.""" cl = _udp_client() _test_timing(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_timing_tcp(): """TCPStatsClient.timing works.""" cl = _tcp_client() _test_timing(cl, 'tcp') def test_timing_supports_timedelta(): cl = _udp_client() proto = 'udp' cl.timing('foo', timedelta(seconds=1.5)) _sock_check(cl._sock, 1, proto, 'foo:1500.000000|ms') cl.timing('foo', timedelta(days=1.5)) _sock_check(cl._sock, 2, proto, 'foo:129600000.000000|ms') @mock.patch.object(random, 'random', lambda: -1) def test_timing_unix_socket(): """UnixSocketStatsClient.timing works.""" cl = _unix_socket_client() _test_timing(cl, 'unix') def _test_prepare(cl, proto): tests = ( ('foo:1|c', ('foo', '1|c', 1)), ('bar:50|ms|@0.5', ('bar', '50|ms', 0.5)), ('baz:23|g', ('baz', '23|g', 1)), ) def _check(o, s, v, r): with mock.patch.object(random, 'random', lambda: -1): eq_(o, cl._prepare(s, v, r)) for o, (s, v, r) in tests: _check(o, s, v, r) @mock.patch.object(random, 'random', lambda: -1) def test_prepare_udp(): """Test StatsClient._prepare method.""" cl = _udp_client() _test_prepare(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_prepare_tcp(): """Test TCPStatsClient._prepare method.""" cl = _tcp_client() _test_prepare(cl, 'tcp') def _test_prefix(cl, proto): cl.incr('bar') _sock_check(cl._sock, 1, proto, 'foo.bar:1|c') @mock.patch.object(random, 'random', lambda: -1) def test_prefix_udp(): """StatsClient.incr works.""" cl = _udp_client(prefix='foo') _test_prefix(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_prefix_tcp(): """TCPStatsClient.incr works.""" cl = _tcp_client(prefix='foo') _test_prefix(cl, 'tcp') @mock.patch.object(random, 'random', lambda: -1) def test_prefix_unix_socket(): """UnixSocketStatsClient.incr works.""" cl = _unix_socket_client(prefix='foo') _test_prefix(cl, 'unix') def _test_timer_manager(cl, proto): with cl.timer('foo'): pass _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_timer_manager_udp(): """StatsClient.timer can be used as manager.""" cl = _udp_client() _test_timer_manager(cl, 'udp') def test_timer_manager_tcp(): """TCPStatsClient.timer can be used as manager.""" cl = _tcp_client() _test_timer_manager(cl, 'tcp') def _test_timer_decorator(cl, proto): @cl.timer('foo') def foo(a, b): return [a, b] @cl.timer('bar') def bar(a, b): return [b, a] # make sure it works with more than one decorator, called multiple # times, and that parameters are handled correctly eq_([4, 2], foo(4, 2)) _timer_check(cl._sock, 1, proto, 'foo', 'ms') eq_([2, 4], bar(4, 2)) _timer_check(cl._sock, 2, proto, 'bar', 'ms') eq_([6, 5], bar(5, 6)) _timer_check(cl._sock, 3, proto, 'bar', 'ms') def test_timer_decorator_udp(): """StatsClient.timer is a thread-safe decorator (UDP).""" cl = _udp_client() _test_timer_decorator(cl, 'udp') def test_timer_decorator_tcp(): """StatsClient.timer is a thread-safe decorator (TCP).""" cl = _tcp_client() _test_timer_decorator(cl, 'tcp') def _test_timer_capture(cl, proto): with cl.timer('woo') as result: eq_(result.ms, None) assert isinstance(result.ms, float) def test_timer_capture_udp(): """You can capture the output of StatsClient.timer (UDP).""" cl = _udp_client() _test_timer_capture(cl, 'udp') def test_timer_capture_tcp(): """You can capture the output of StatsClient.timer (TCP).""" cl = _tcp_client() _test_timer_capture(cl, 'tcp') def _test_timer_context_rate(cl, proto): with cl.timer('foo', rate=0.5): pass _timer_check(cl._sock, 1, proto, 'foo', 'ms|@0.5') @mock.patch.object(random, 'random', lambda: -1) def test_timer_context_rate_udp(): """StatsClient.timer can be used as manager with rate.""" cl = _udp_client() _test_timer_context_rate(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_timer_context_rate_tcp(): """TCPStatsClient.timer can be used as manager with rate.""" cl = _tcp_client() _test_timer_context_rate(cl, 'tcp') def test_timer_decorator_partial_function(): """TCPStatsClient.timer can be used as decorator on a partial function.""" cl = _tcp_client() foo = functools.partial(lambda x: x * x, 2) func = cl.timer('foo')(foo) eq_(4, func()) _timer_check(cl._sock, 1, 'tcp', 'foo', 'ms|@0.1') def _test_timer_decorator_rate(cl, proto): @cl.timer('foo', rate=0.1) def foo(a, b): return [b, a] @cl.timer('bar', rate=0.2) def bar(a, b=2, c=3): return [c, b, a] eq_([2, 4], foo(4, 2)) _timer_check(cl._sock, 1, proto, 'foo', 'ms|@0.1') eq_([3, 2, 5], bar(5)) _timer_check(cl._sock, 2, proto, 'bar', 'ms|@0.2') @mock.patch.object(random, 'random', lambda: -1) def test_timer_decorator_rate_udp(): """StatsClient.timer can be used as decorator with rate.""" cl = _udp_client() _test_timer_decorator_rate(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_timer_decorator_rate_tcp(): """TCPStatsClient.timer can be used as decorator with rate.""" cl = _tcp_client() _test_timer_decorator_rate(cl, 'tcp') def _test_timer_context_exceptions(cl, proto): with assert_raises(socket.timeout): with cl.timer('foo'): raise socket.timeout() _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_timer_context_exceptions_udp(): cl = _udp_client() _test_timer_context_exceptions(cl, 'udp') def test_timer_context_exceptions_tcp(): cl = _tcp_client() _test_timer_context_exceptions(cl, 'tcp') def _test_timer_decorator_exceptions(cl, proto): @cl.timer('foo') def foo(): raise ValueError() with assert_raises(ValueError): foo() _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_timer_decorator_exceptions_udp(): cl = _udp_client() _test_timer_decorator_exceptions(cl, 'udp') def test_timer_decorator_exceptions_tcp(): cl = _tcp_client() _test_timer_decorator_exceptions(cl, 'tcp') def _test_timer_object(cl, proto): t = cl.timer('foo').start() t.stop() _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_timer_object_udp(): """StatsClient.timer works.""" cl = _udp_client() _test_timer_object(cl, 'udp') def test_timer_object_tcp(): """TCPStatsClient.timer works.""" cl = _tcp_client() _test_timer_object(cl, 'tcp') def _test_timer_object_no_send(cl, proto): t = cl.timer('foo').start() t.stop(send=False) _sock_check(cl._sock, 0, proto) t.send() _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_timer_object_no_send_udp(): """Stop StatsClient.timer without sending.""" cl = _udp_client() _test_timer_object_no_send(cl, 'udp') def test_timer_object_no_send_tcp(): """Stop TCPStatsClient.timer without sending.""" cl = _tcp_client() _test_timer_object_no_send(cl, 'tcp') def _test_timer_object_rate(cl, proto): t = cl.timer('foo', rate=0.5) t.start() t.stop() _timer_check(cl._sock, 1, proto, 'foo', 'ms@0.5') @mock.patch.object(random, 'random', lambda: -1) def test_timer_object_rate_udp(): """StatsClient.timer works with rate.""" cl = _udp_client() _test_timer_object_rate(cl, 'udp') @mock.patch.object(random, 'random', lambda: -1) def test_timer_object_rate_tcp(): """TCPStatsClient.timer works with rate.""" cl = _tcp_client() _test_timer_object_rate(cl, 'tcp') def _test_timer_object_no_send_twice(cl): t = cl.timer('foo').start() t.stop() with assert_raises(RuntimeError): t.send() def test_timer_object_no_send_twice_udp(): """StatsClient.timer raises RuntimeError if send is called twice.""" cl = _udp_client() _test_timer_object_no_send_twice(cl) def test_timer_object_no_send_twice_tcp(): """TCPStatsClient.timer raises RuntimeError if send is called twice.""" cl = _tcp_client() _test_timer_object_no_send_twice(cl) def _test_timer_send_without_stop(cl): with cl.timer('foo') as t: assert t.ms is None with assert_raises(RuntimeError): t.send() t = cl.timer('bar').start() assert t.ms is None with assert_raises(RuntimeError): t.send() def test_timer_send_without_stop_udp(): """StatsClient.timer raises error if send is called before stop.""" cl = _udp_client() _test_timer_send_without_stop(cl) def test_timer_send_without_stop_tcp(): """TCPStatsClient.timer raises error if send is called before stop.""" cl = _tcp_client() _test_timer_send_without_stop(cl) def _test_timer_object_stop_without_start(cl): with assert_raises(RuntimeError): cl.timer('foo').stop() def test_timer_object_stop_without_start_udp(): """StatsClient.timer raises error if stop is called before start.""" cl = _udp_client() _test_timer_object_stop_without_start(cl) def test_timer_object_stop_without_start_tcp(): """TCPStatsClient.timer raises error if stop is called before start.""" cl = _tcp_client() _test_timer_object_stop_without_start(cl) def _test_pipeline(cl, proto): pipe = cl.pipeline() pipe.incr('foo') pipe.decr('bar') pipe.timing('baz', 320) pipe.send() _sock_check(cl._sock, 1, proto, 'foo:1|c\nbar:-1|c\nbaz:320.000000|ms') def test_pipeline_udp(): """StatsClient.pipeline works.""" cl = _udp_client() _test_pipeline(cl, 'udp') def test_pipeline_tcp(): """TCPStatsClient.pipeline works.""" cl = _tcp_client() _test_pipeline(cl, 'tcp') def _test_pipeline_null(cl, proto): pipe = cl.pipeline() pipe.send() _sock_check(cl._sock, 0, proto) def test_pipeline_null_udp(): """Ensure we don't error on an empty pipeline (UDP).""" cl = _udp_client() _test_pipeline_null(cl, 'udp') def test_pipeline_null_tcp(): """Ensure we don't error on an empty pipeline (TCP).""" cl = _tcp_client() _test_pipeline_null(cl, 'tcp') def _test_pipeline_manager(cl, proto): with cl.pipeline() as pipe: pipe.incr('foo') pipe.decr('bar') pipe.gauge('baz', 15) _sock_check(cl._sock, 1, proto, 'foo:1|c\nbar:-1|c\nbaz:15|g') def test_pipeline_manager_udp(): """StatsClient.pipeline can be used as manager.""" cl = _udp_client() _test_pipeline_manager(cl, 'udp') def test_pipeline_manager_tcp(): """TCPStatsClient.pipeline can be used as manager.""" cl = _tcp_client() _test_pipeline_manager(cl, 'tcp') def _test_pipeline_timer_manager(cl, proto): with cl.pipeline() as pipe: with pipe.timer('foo'): pass _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_pipeline_timer_manager_udp(): """Timer manager can be retrieve from UDP Pipeline manager.""" cl = _udp_client() _test_pipeline_timer_manager(cl, 'udp') def test_pipeline_timer_manager_tcp(): """Timer manager can be retrieve from TCP Pipeline manager.""" cl = _tcp_client() _test_pipeline_timer_manager(cl, 'tcp') def _test_pipeline_timer_decorator(cl, proto): with cl.pipeline() as pipe: @pipe.timer('foo') def foo(): pass foo() _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_pipeline_timer_decorator_udp(): """UDP Pipeline manager can be used as decorator.""" cl = _udp_client() _test_pipeline_timer_decorator(cl, 'udp') def test_pipeline_timer_decorator_tcp(): """TCP Pipeline manager can be used as decorator.""" cl = _tcp_client() _test_pipeline_timer_decorator(cl, 'tcp') def _test_pipeline_timer_object(cl, proto): with cl.pipeline() as pipe: t = pipe.timer('foo').start() t.stop() _sock_check(cl._sock, 0, proto) _timer_check(cl._sock, 1, proto, 'foo', 'ms') def test_pipeline_timer_object_udp(): """Timer from UDP Pipeline manager works.""" cl = _udp_client() _test_pipeline_timer_object(cl, 'udp') def test_pipeline_timer_object_tcp(): """Timer from TCP Pipeline manager works.""" cl = _tcp_client() _test_pipeline_timer_object(cl, 'tcp') def _test_pipeline_empty(cl): with cl.pipeline() as pipe: pipe.incr('foo') eq_(1, len(pipe._stats)) eq_(0, len(pipe._stats)) def test_pipeline_empty_udp(): """Pipelines should be empty after a send() call (UDP).""" cl = _udp_client() _test_pipeline_empty(cl) def test_pipeline_empty_tcp(): """Pipelines should be empty after a send() call (TCP).""" cl = _tcp_client() _test_pipeline_empty(cl) def _test_pipeline_negative_absolute_gauge(cl, proto): with cl.pipeline() as pipe: pipe.gauge('foo', -10, delta=False) pipe.incr('bar') _sock_check(cl._sock, 1, proto, 'foo:0|g\nfoo:-10|g\nbar:1|c') def test_pipeline_negative_absolute_gauge_udp(): """Negative absolute gauges use an internal pipeline (UDP).""" cl = _udp_client() _test_pipeline_negative_absolute_gauge(cl, 'udp') def test_pipeline_negative_absolute_gauge_tcp(): """Negative absolute gauges use an internal pipeline (TCP).""" cl = _tcp_client() _test_pipeline_negative_absolute_gauge(cl, 'tcp') def _test_big_numbers(cl, proto): num = 1234568901234 tests = ( # Explicitly create strings so we avoid the bug we're trying to test. ('gauge', 'foo:1234568901234|g'), ('incr', 'foo:1234568901234|c'), ('timing', 'foo:1234568901234.000000|ms'), ) def _check(method, result): cl._sock.reset_mock() getattr(cl, method)('foo', num) _sock_check(cl._sock, 1, proto, result) for method, result in tests: _check(method, result) def test_big_numbers_udp(): """Test big numbers with UDP client.""" cl = _udp_client() _test_big_numbers(cl, 'udp') def test_big_numbers_tcp(): """Test big numbers with TCP client.""" cl = _tcp_client() _test_big_numbers(cl, 'tcp') def _test_rate_no_send(cl, proto): cl.incr('foo', rate=0.5) _sock_check(cl._sock, 0, proto) @mock.patch.object(random, 'random', lambda: 2) def test_rate_no_send_udp(): """Rate below random value prevents sending with StatsClient.incr.""" cl = _udp_client() _test_rate_no_send(cl, 'udp') @mock.patch.object(random, 'random', lambda: 2) def test_rate_no_send_tcp(): """Rate below random value prevents sending with TCPStatsClient.incr.""" cl = _tcp_client() _test_rate_no_send(cl, 'tcp') def test_socket_error(): """Socket error on StatsClient should be ignored.""" cl = _udp_client() cl._sock.sendto.side_effect = socket.timeout() cl.incr('foo') _sock_check(cl._sock, 1, 'udp', 'foo:1|c') def test_pipeline_packet_size(): """Pipelines shouldn't send packets larger than 512 bytes (UDP only).""" sc = _udp_client() pipe = sc.pipeline() for x in range(32): # 32 * 16 = 512, so this will need 2 packets. pipe.incr('sixteen_char_str') pipe.send() eq_(2, sc._sock.sendto.call_count) assert len(sc._sock.sendto.call_args_list[0][0][0]) <= 512 assert len(sc._sock.sendto.call_args_list[1][0][0]) <= 512 @mock.patch.object(socket, 'socket') def test_tcp_raises_exception_to_user(mock_socket): """Socket errors in TCPStatsClient should be raised to user.""" addr = ('127.0.0.1', 1234) cl = _tcp_client(addr=addr[0], port=addr[1]) cl.incr('foo') eq_(1, cl._sock.sendall.call_count) cl._sock.sendall.side_effect = socket.error with assert_raises(socket.error): cl.incr('foo') @mock.patch.object(socket, 'socket') def test_tcp_timeout(mock_socket): """Timeout on TCPStatsClient should be set on socket.""" test_timeout = 321 cl = TCPStatsClient(timeout=test_timeout) cl.incr('foo') cl._sock.settimeout.assert_called_once_with(test_timeout) @mock.patch.object(socket, 'socket') def test_unix_socket_timeout(mock_socket): """Timeout on UnixSocketStatsClient should be set on socket.""" test_timeout = 321 cl = UnixSocketStatsClient(UNIX_SOCKET, timeout=test_timeout) cl.incr('foo') cl._sock.settimeout.assert_called_once_with(test_timeout) pystatsd-3.3/tox.ini000066400000000000000000000002701333727011600145470ustar00rootroot00000000000000[tox] envlist = py27,pypy,py34,py35,py36 [testenv] deps= mock==1.0.1 nose==1.2.1 coverage==3.5.2 commands= nosetests statsd --with-coverage --cover-package=statsd []