pax_global_header00006660000000000000000000000064137654457650014540gustar00rootroot0000000000000052 comment=1f627998fde4d4a0d454c4a6b8b5221158037a91 smoke-zephyr-2.0.1/000077500000000000000000000000001376544576500141755ustar00rootroot00000000000000smoke-zephyr-2.0.1/.github/000077500000000000000000000000001376544576500155355ustar00rootroot00000000000000smoke-zephyr-2.0.1/.github/workflows/000077500000000000000000000000001376544576500175725ustar00rootroot00000000000000smoke-zephyr-2.0.1/.github/workflows/ci.yml000066400000000000000000000036231376544576500207140ustar00rootroot00000000000000name: Continuous Integration on: push: pull_request: jobs: unit-tests: name: Test on Python v${{ matrix.python-version }} runs-on: ubuntu-latest strategy: matrix: python-version: [3.4, 3.5, 3.6, 3.7, 3.8, 3.9] steps: - name: Checkout the repository uses: actions/checkout@v2 with: persist-credentials: false - name: Install build essentials run: sudo apt-get --yes install build-essential - name: Set up Python uses: actions/setup-python@v2 with: python-version: ${{ matrix.python-version }} - name: Install dependencies run: | pip install pip==19.0.3 pipenv==2018.11.26 pipenv install --dev - run: pipenv run tests - run: pipenv run tests-coverage publish-release: name: Publish the release if: github.event_name == 'push' && startsWith(github.ref, 'refs/tags') runs-on: ubuntu-latest steps: - name: Checkout the repository uses: actions/checkout@v2 with: persist-credentials: false - name: Install build essentials run: sudo apt-get --yes install build-essential - name: Set up Python uses: actions/setup-python@v2 - name: Install dependencies run: | pip install pip==19.0.3 pipenv==2018.11.26 pipenv install --dev - name: Create the distribution run: pipenv run python setup.py build sdist - name: Publish the distribution uses: pypa/gh-action-pypi-publish@master with: user: __token__ password: ${{ secrets.PYPI_PASSWORD }} - name: Create the release id: create_release uses: actions/create-release@v1 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} with: tag_name: ${{ github.ref }} release_name: Release ${{ github.ref }} smoke-zephyr-2.0.1/.gitignore000066400000000000000000000002551376544576500161670ustar00rootroot00000000000000*.conf *.geany *.py[cod] .buildinfo .coverage .github/workflows/*.jnj .doctrees/* .idea/* build/* dist/* docs/build docs/html smoke_zephyr.egg-info/* MANIFEST Pipfile.lock smoke-zephyr-2.0.1/.pylintrc000066400000000000000000000050611376544576500160440ustar00rootroot00000000000000[MASTER] persistent=no jobs=1 unsafe-load-any-extension=no [MESSAGES CONTROL] confidence=UNDEFINED #enable=C0325,C0326,W0611,W0612,W0613 #disable=all disable= C0111,C0301,C0330,C0412, E1101,E1608,W1627,E1601,E1603,E1602,E1605,E1604,E1607,E1606, F0401, I0011,I0020,I0021, R0201, W0622,W0703,W0704,W1202,W1601,W1602,W1603,W1604,W1605,W1606,W1607,W1608,W1609,W1610,W1611,W1612,W1613,W1614,W1615,W1616,W1617,W1618,W1619,W1620,W1621,W1622,W1623,W1624,W1625,W1626,W1628,W1629,W1630,W1631,W1632,W1633 [REPORTS]. output-format=colorized files-output=no reports=yes msg-template="{path}:{line}: [{msg_id}({symbol}), {obj}] {msg}" [LOGGING] logging-modules=logging [BASIC] bad-functions=input good-names=i,j,k,ex,Run,_ bad-names=foo,bar,baz,toto,tutu,tata function-rgx=[a-z_][a-z0-9_]{2,30}$ function-name-hint=[a-z_][a-z0-9_]{2,30}$ variable-rgx=[a-z_][a-z0-9_]{2,30}$ variable-name-hint=[a-z_][a-z0-9_]{2,30}$ const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$ attr-rgx=[a-z_][a-z0-9_]{2,30}$ attr-name-hint=[a-z_][a-z0-9_]{2,30}$ argument-rgx=[a-z_][a-z0-9_]{2,30}$ argument-name-hint=[a-z_][a-z0-9_]{2,30}$ class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$ class-rgx=[A-Z_][a-zA-Z0-9]+$ class-name-hint=[A-Z_][a-zA-Z0-9]+$ module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ method-rgx=(([a-z_][a-z0-9_]{2,40})|(assert[a-zA-Z0-9]{2,40})|(do_[a-zA-Z0-9_]{2,40}))$ method-name-hint=(([a-z_][a-z0-9_]{2,40})|(assert[a-zA-Z0-9]{2,40})|(do_[a-zA-Z0-9_]{2,40}))$ [FORMAT] ignore-long-lines=^\s*(# )??$ single-line-if-stmt=no no-space-check=trailing-comma,dict-separator indent-string=\t indent-after-paren=4 expected-line-ending-format=LF [VARIABLES] init-import=no callbacks=cb_,_cb,signal_ [SIMILARITIES] min-similarity-lines=4 ignore-comments=yes ignore-docstrings=yes ignore-imports=no [TYPECHECK] ignore-mixin-members=yes generated-members=REQUEST,acl_users,aq_parent [MISCELLANEOUS] notes=FIXME,XXX,TODO [DESIGN] max-args=8 ignored-argument-names=_.* max-locals=15 max-returns=12 max-branches=20 max-statements=70 max-parents=7 max-attributes=12 min-public-methods=0 max-public-methods=30 [CLASSES] defining-attr-methods=__init__,__new__,setUp valid-classmethod-first-arg=cls valid-metaclass-classmethod-first-arg=mcs exclude-protected=_asdict,_fields,_replace,_source,_make [EXCEPTIONS] overgeneral-exceptions=Exception smoke-zephyr-2.0.1/LICENSE000066400000000000000000000026321376544576500152050ustar00rootroot00000000000000Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the project nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. smoke-zephyr-2.0.1/Pipfile000066400000000000000000000005301376544576500155060ustar00rootroot00000000000000[[source]] url = "https://pypi.org/simple" verify_ssl = true name = "pypi" [packages] [dev-packages] sphinx = "*" coverage = "*" [scripts] tests = 'sh -c "PYTHONPATH=$(pwd)/lib python -m unittest -v tests"' tests-coverage = 'sh -c "PYTHONPATH=$(pwd)/lib coverage run -m unittest -v tests && coverage report --include=\"*/smoke_zephyr/*\""' smoke-zephyr-2.0.1/README.rst000066400000000000000000000030641376544576500156670ustar00rootroot00000000000000smoke-zephyr ============ Python utility collection |Documentation Status| |Github Issues| |PyPi Release| **Version 2.0** -- From version 2.0 onwards, Python 2.7 is no longer supported. For Python 2.7 support, use the latest 1.x version (`v1.4.1`_). License ------- smoke-zephyr is released under the BSD 3-clause license, for more details see the `LICENSE`_ file. Supported Versions ------------------ The following version of Python are currently supported: - Python 3.4 - Python 3.5 - Python 3.6 - Python 3.7 Code Documentation ------------------ smoke-zephyr uses Sphinx for internal code documentation. This documentation can be generated from source with the command ``sphinx-build docs/source docs/html``. The latest documentation is kindly hosted on `ReadTheDocs`_ at `smoke-zephyr.readthedocs.io`_. .. _LICENSE: https://github.com/zeroSteiner/smoke-zephyr/blob/master/LICENSE .. _ReadTheDocs: https://readthedocs.org/ .. _smoke-zephyr.readthedocs.io: https://smoke-zephyr.readthedocs.io/en/latest/ .. _v1.4.1: https://github.com/zeroSteiner/smoke-zephyr/releases/tag/v1.4.1 .. |Documentation Status| image:: https://readthedocs.org/projects/smoke-zephyr/badge/?version=latest&style=flat-square :target: http://smoke-zephyr.readthedocs.io/en/latest .. |Github Issues| image:: http://img.shields.io/github/issues/zerosteiner/smoke-zephyr.svg?style=flat-square :target: https://github.com/zerosteiner/smoke-zephyr/issues .. |PyPi Release| image:: https://img.shields.io/pypi/v/smoke-zephyr.svg?style=flat-square :target: https://pypi.python.org/pypi/smoke-zephyrsmoke-zephyr-2.0.1/docs/000077500000000000000000000000001376544576500151255ustar00rootroot00000000000000smoke-zephyr-2.0.1/docs/source/000077500000000000000000000000001376544576500164255ustar00rootroot00000000000000smoke-zephyr-2.0.1/docs/source/conf.py000066400000000000000000000232021376544576500177230ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # smoke-zephyr documentation build configuration file, created by # sphinx-quickstart on Wed May 21 09:18:54 2014. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys import os _prj_root = os.path.dirname(__file__) _prj_root = os.path.relpath(os.path.join('..', '..'), _prj_root) _prj_root = os.path.abspath(_prj_root) sys.path.insert(1, _prj_root) del _prj_root import smoke_zephyr GITHUB_BRANCH = 'master' GITHUB_REPO = 'zeroSteiner/smoke-zephyr' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '1.6' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.intersphinx', 'sphinx.ext.linkcode'] intersphinx_mapping = { 'python': ('https://docs.python.org/3/', None), } def linkcode_resolve(domain, info): if domain != 'py': return None if not info['module']: return None file_name = info['module'].replace('.', '/') + '.py' return "https://github.com/{0}/blob/{1}/{2}".format(GITHUB_REPO, GITHUB_BRANCH, file_name) # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'smoke-zephyr' copyright = u'2014-2018, Spencer McIntyre' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = smoke_zephyr.version.split('-')[0] # The full version, including alpha/beta/rc tags. release = smoke_zephyr.distutils_version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = False # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if os.environ.get('READTHEDOCS', None) != 'True': html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'smoke-zephyr-doc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'smoke-zephyr.tex', u'smoke-zephyr Documentation', u'Spencer McIntyre', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'smoke-zephyr', u'smoke-zephyr Documentation', [u'Spencer McIntyre'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------------ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'smoke-zephyr', u'smoke-zephyr Documentation', u'Spencer McIntyre', 'smoke-zephyr', 'Python utility collection', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Epub output --------------------------------------------------- # Bibliographic Dublin Core info. epub_title = u'smoke-zephyr' epub_author = u'Spencer McIntyre' epub_publisher = u'Spencer McIntyre' epub_copyright = u'2014, Spencer McIntyre' # The language of the text. It defaults to the language option # or en if the language is not set. #epub_language = '' # The scheme of the identifier. Typical schemes are ISBN or URL. #epub_scheme = '' # The unique identifier of the text. This can be a ISBN number # or the project homepage. #epub_identifier = '' # A unique identification for the text. #epub_uid = '' # A tuple containing the cover image and cover page html template filenames. #epub_cover = () # HTML files that should be inserted before the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_pre_files = [] # HTML files shat should be inserted after the pages created by sphinx. # The format is a list of tuples containing the path and title. #epub_post_files = [] # A list of files that should not be packed into the epub file. #epub_exclude_files = [] # The depth of the table of contents in toc.ncx. #epub_tocdepth = 3 # Allow duplicate toc entries. #epub_tocdup = True smoke-zephyr-2.0.1/docs/source/index.rst000066400000000000000000000003071376544576500202660ustar00rootroot00000000000000The smoke_zephyr Package ======================== .. toctree:: :maxdepth: 2 smoke_zephyr/index.rst Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` smoke-zephyr-2.0.1/docs/source/smoke_zephyr/000077500000000000000000000000001376544576500211445ustar00rootroot00000000000000smoke-zephyr-2.0.1/docs/source/smoke_zephyr/argparse_types.rst000066400000000000000000000015251376544576500247310ustar00rootroot00000000000000:mod:`argparse_types` --- Extra argparse types ============================================== .. module:: smoke_zephyr.argparse_types :synopsis: Extra argparse types Functions --------- .. autofunction:: smoke_zephyr.argparse_types.bin_b64_type .. autofunction:: smoke_zephyr.argparse_types.bin_hex_type .. autofunction:: smoke_zephyr.argparse_types.dir_type .. autofunction:: smoke_zephyr.argparse_types.email_type .. autofunction:: smoke_zephyr.argparse_types.log_level_type .. autofunction:: smoke_zephyr.argparse_types.port_type .. autofunction:: smoke_zephyr.argparse_types.timespan_type Classes ------- .. autoclass:: smoke_zephyr.argparse_types.IntRange :members: :special-members: __init__ :undoc-members: .. autoclass:: smoke_zephyr.argparse_types.RegexType :members: :special-members: __init__ :undoc-members: smoke-zephyr-2.0.1/docs/source/smoke_zephyr/configuration.rst000066400000000000000000000011761376544576500245520ustar00rootroot00000000000000:mod:`configuration` --- General configuration manager ====================================================== .. module:: smoke_zephyr.configuration :synopsis: General configuration manager Data ---- .. autodata:: smoke_zephyr.configuration.SERIALIZER_DRIVERS :annotation: .. autodata:: smoke_zephyr.configuration.has_yaml Classes ------- .. autoclass:: smoke_zephyr.configuration.Configuration :show-inheritance: :members: :special-members: __init__ :undoc-members: .. autoclass:: smoke_zephyr.configuration.MemoryConfiguration :show-inheritance: :members: :special-members: __init__ :undoc-members: smoke-zephyr-2.0.1/docs/source/smoke_zephyr/index.rst000066400000000000000000000010001376544576500227740ustar00rootroot00000000000000:mod:`smoke_zephyr` --- Miscellaneous Python classes and functions ================================================================== .. module:: smoke_zephyr :synopsis: Miscellaneous Python classes and functions Data ---- .. autodata:: smoke_zephyr.distutils_version .. autodata:: smoke_zephyr.version .. autodata:: smoke_zephyr.version_info .. autodata:: smoke_zephyr.version_label Modules ------- .. toctree:: :maxdepth: 2 argparse_types.rst configuration.rst job.rst utilities.rst smoke-zephyr-2.0.1/docs/source/smoke_zephyr/job.rst000066400000000000000000000014031376544576500224460ustar00rootroot00000000000000:mod:`job` --- Asynchronous job manager ======================================= .. module:: smoke_zephyr.job :synopsis: Asynchronous job manager The :py:class:`.JobManager` provides a way to schedule jobs and run tasks asynchronously from within python on the local system. In this case jobs are callback functions defined by the user. .. warning:: The timing and scheduling functions within this module are not designed to be precise to the second. Functions --------- .. autofunction:: smoke_zephyr.job.normalize_job_id Classes ------- .. autoclass:: smoke_zephyr.job.JobManager :members: :special-members: __init__ :undoc-members: .. autoclass:: smoke_zephyr.job.JobRequestDelete :members: :special-members: __init__ :undoc-members: smoke-zephyr-2.0.1/docs/source/smoke_zephyr/utilities.rst000066400000000000000000000026371376544576500237210ustar00rootroot00000000000000:mod:`utilities` --- Miscellaneous Python classes and functions =============================================================== .. module:: smoke_zephyr.utilities :synopsis: Miscellaneous Python classes and functions Functions --------- .. autofunction:: configure_stream_logger .. autofunction:: download .. autofunction:: escape_single_quote .. autofunction:: format_bytes_size .. autofunction:: get_ip_list .. autofunction:: grep .. autofunction:: open_uri .. autofunction:: parse_case_camel_to_snake .. autofunction:: parse_case_snake_to_camel .. autofunction:: parse_server .. autofunction:: parse_timespan .. autofunction:: random_string_alphanumeric .. autofunction:: selection_collision .. autofunction:: sort_ipv4_list .. autofunction:: unescape_single_quote .. autofunction:: unique .. autofunction:: weighted_choice .. autofunction:: which .. autofunction:: xfrange Classes ------- .. autoclass:: AttributeDict :members: :special-members: __init__ :undoc-members: .. autoclass:: BruteforceGenerator :members: :special-members: __init__ .. autoclass:: Cache :members: :special-members: __init__ :undoc-members: .. autoclass:: FileWalker :members: :special-members: __init__ :undoc-members: .. autoclass:: SectionConfigParser :members: :special-members: __init__ :undoc-members: .. autoclass:: TestCase :members: :show-inheritance: :undoc-members: smoke-zephyr-2.0.1/setup.py000066400000000000000000000065601376544576500157160ustar00rootroot00000000000000#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # setup.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # import os import re import sys base_directory = os.path.dirname(__file__) try: from setuptools import setup, find_packages except ImportError: print('This project needs setuptools in order to build. Install it using your package') print('manager (usually python-setuptools) or via pip (pip install setuptools).') sys.exit(1) try: with open(os.path.join(base_directory, 'README.rst')) as file_h: long_description = file_h.read() except OSError: sys.stderr.write('README.rst is unavailable, can not generate the long description\n') long_description = None with open(os.path.join(base_directory, 'smoke_zephyr', '__init__.py')) as file_h: match = re.search( r'^version_info\s*=\s*(?:\w*\.)?namedtuple\(\'\w+\',\s*\[\'major\',\s*\'minor\',\s*\'micro\'\]\)\((\d+),\s*(\d+),\s*(\d+)\)$', file_h.read(), flags=re.MULTILINE ) if match is None: raise RuntimeError('Unable to find the version information') version = '.'.join(map(str, match.groups())) DESCRIPTION = """\ This project provides a collection of miscellaneous Python utilities.\ """ setup( name='smoke-zephyr', version=version, author='Spencer McIntyre', author_email='zeroSteiner@gmail.com', maintainer='Spencer McIntyre', maintainer_email='zeroSteiner@gmail.com', description=DESCRIPTION, long_description=long_description, url='https://github.com/zeroSteiner/smoke-zephyr', license='BSD', packages=['smoke_zephyr'], classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: POSIX', 'Programming Language :: Python', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', ] ) smoke-zephyr-2.0.1/smoke_zephyr/000077500000000000000000000000001376544576500167145ustar00rootroot00000000000000smoke-zephyr-2.0.1/smoke_zephyr/__init__.py000066400000000000000000000046341376544576500210340ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # smoke_zephyr/__init__.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections # Semantic Versioning: http://semver.org/spec/v2.0.0.html version_info = collections.namedtuple('version_info', ['major', 'minor', 'micro'])(2, 0, 1) """A tuple representing the version information in the format ('major', 'minor', 'micro')""" version_label = '' """A version lable such as alpha or beta.""" version = "{0}.{1}.{2}".format(version_info.major, version_info.minor, version_info.micro) """A string representing the full version information.""" # distutils_version is compatible with distutils.version classes distutils_version = version """A string sutiable for being parsed by :py:mod:`distutils.version` classes.""" if version_label: version += '-' + version_label distutils_version += version_label[0] if version_label[-1].isdigit(): distutils_version += version_label[-1] else: distutils_version += '0' __version__ = distutils_version smoke-zephyr-2.0.1/smoke_zephyr/argparse_types.py000066400000000000000000000115641376544576500223250ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # smoke_zephyr/argparse_types.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import argparse import ast import base64 import binascii import logging import os import re from .utilities import is_valid_email_address from .utilities import parse_timespan class RegexType(object): """An argparse type representing an arbitrary string which matches the specified regex.""" def __init__(self, regex, error_message=None): self.regex = regex self.error_message = (error_message or "{arg} is invalid") def __call__(self, arg): if hasattr(self.regex, 'match'): result = self.regex.match(arg) else: result = re.match(self.regex, arg) if not result: raise argparse.ArgumentTypeError(self.error_message.format(arg=repr(arg))) return arg class IntRange(object): """An argparse type representing an integer which must fall in a specified range.""" def __init__(self, stop, start=None): self.start = (0 if start is None else start) self.stop = (stop if start is None else start) - 1 def __call__(self, arg): try: arg = ast.literal_eval(arg) except ValueError: raise argparse.ArgumentTypeError("{arg} is invalid".format(arg=repr(arg))) if not isinstance(arg, int): raise argparse.ArgumentTypeError("{arg} is invalid".format(arg=repr(arg))) if arg < self.start: raise argparse.ArgumentTypeError("{arg} is invalid (low)".format(arg=repr(arg))) if arg > self.stop: raise argparse.ArgumentTypeError("{arg} is invalid (high)".format(arg=repr(arg))) return arg def bin_b64_type(arg): """An argparse type representing binary data encoded in base64.""" try: arg = base64.standard_b64decode(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid base64 data".format(repr(arg))) return arg def bin_hex_type(arg): """An argparse type representing binary data encoded in hex.""" if re.match(r'^[a-f0-9]{2}(:[a-f0-9]{2})+$', arg, re.I): arg = arg.replace(':', '') elif re.match(r'^(\\x[a-f0-9]{2})+$', arg, re.I): arg = arg.replace('\\x', '') try: arg = binascii.a2b_hex(arg) except (binascii.Error, TypeError): raise argparse.ArgumentTypeError("{0} is invalid hex data".format(repr(arg))) return arg def dir_type(arg): """An argparse type representing a valid directory.""" if not os.path.isdir(arg): raise argparse.ArgumentTypeError("{0} is not a valid directory".format(repr(arg))) return arg def email_type(arg): """An argparse type representing an email address.""" if not is_valid_email_address(arg): raise argparse.ArgumentTypeError("{0} is not a valid email address".format(repr(arg))) return arg def log_level_type(arg): """An argparse type representing a logging level.""" if not arg.upper() in ('NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'): raise argparse.ArgumentTypeError("{0} is not a valid log level".format(repr(arg))) return getattr(logging, arg.upper()) def port_type(arg): """An argparse type representing a tcp or udp port number.""" error_msg = "{0} is not a valid port".format(repr(arg)) try: arg = ast.literal_eval(arg) except ValueError: raise argparse.ArgumentTypeError(error_msg) if arg < 0 or arg > 65535: raise argparse.ArgumentTypeError(error_msg) return arg def timespan_type(arg): """An argparse type representing a timespan such as 6h for 6 hours.""" try: arg = parse_timespan(arg) except ValueError: raise argparse.ArgumentTypeError("{0} is not a valid time span".format(repr(arg))) return arg smoke-zephyr-2.0.1/smoke_zephyr/configuration.py000066400000000000000000000204301376544576500221340ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # smoke_zephyr/configuration.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import copy import json import os import sys from collections.abc import Mapping as _Mapping try: import yaml except ImportError: has_yaml = False """Whether the :py:mod:`yaml` module is available or not.""" else: has_yaml = True try: from yaml import CLoader as Loader, CDumper as Dumper except ImportError: from yaml import Loader, Dumper SERIALIZER_DRIVERS = {} """The serializer drivers that are available.""" SERIALIZER_DRIVERS['json'] = {'load': json.load, 'dumps': lambda obj: json.dumps(obj, sort_keys=True, indent=4)} SERIALIZER_DRIVERS['jsn'] = {'load': json.load, 'dumps': lambda obj: json.dumps(obj, sort_keys=True, indent=4)} if has_yaml: SERIALIZER_DRIVERS['yaml'] = {'load': lambda file_obj: yaml.load(file_obj, Loader=Loader), 'dumps': lambda obj: yaml.dumps(obj, default_flow_style=False, Dumper=Dumper)} SERIALIZER_DRIVERS['yml'] = {'load': lambda file_obj: yaml.load(file_obj, Loader=Loader), 'dumps': lambda obj: yaml.dumps(obj, default_flow_style=False, Dumper=Dumper)} class MemoryConfiguration(object): """ This class provides an interface for retrieving values from deeply nested objects supporting Python's __getitem__ interface. """ seperator = '.' def __init__(self, mem_object, prefix=''): """ :param smem_object: The memory object to parse. :param str prefix: String to be prefixed to all option names. :param str object_type: String to identify how to parse the mem_object. """ self.prefix = prefix if not isinstance(mem_object, (dict, _Mapping)): raise TypeError("mem_object does not inherit from dict or {0}.Mapping".format(_Mapping.__module__)) self._storage = mem_object def get(self, item_name): """ Retrieve the value of an option. :param str item_name: The name of the option to retrieve. :return: The value of *item_name* in the configuration. """ if self.prefix: item_name = self.prefix + self.seperator + item_name item_names = item_name.split(self.seperator) node = self._storage for item_name in item_names: node = node[item_name] return node def get_if_exists(self, item_name, default_value=None): """ Retrieve the value of an option if it exists, otherwise return *default_value* instead of raising an error: :param str item_name: The name of the option to retrieve. :param default_value: The value to return if *item_name* does not exist. :return: The value of *item_name* in the configuration. """ if self.has_option(item_name): return self.get(item_name) return default_value def get_storage(self): """ Get a copy of the internal configuration. Changes made to the returned copy will not affect this object. :return: A copy of the internal storage object. :rtype: dict """ return copy.deepcopy(self._storage) def has_option(self, option_name): """ Check that an option exists. :param str option_name: The name of the option to check. :return: True of the option exists in the configuration. :rtype: bool """ if self.prefix: option_name = self.prefix + self.seperator + option_name item_names = option_name.split(self.seperator) node = self._storage for item_name in item_names: if node is None: return False if not item_name in node: return False node = node[item_name] return True def has_section(self, section_name): """ Checks that an option exists and that it contains sub options. :param str section_name: The name of the section to check. :return: True if the section exists. :rtype: dict """ if not self.has_option(section_name): return False return isinstance(self.get(section_name), dict) def set(self, item_name, item_value): """ Sets the value of an option in the configuration. :param str item_name: The name of the option to set. :param item_value: The value of the option to set. """ if self.prefix: item_name = self.prefix + self.seperator + item_name item_names = item_name.split(self.seperator) item_last = item_names.pop() node = self._storage for item_name in item_names: if not item_name in node: node[item_name] = {} node = node[item_name] node[item_last] = item_value return class Configuration(MemoryConfiguration): """ This class provides a generic object for parsing configuration files in multiple formats. """ def __init__(self, configuration_file, prefix=''): """ :param str configuration_file: The configuration file to parse. :param str prefix: String to be prefixed to all option names. """ self.configuration_file = configuration_file with open(self.configuration_file, 'r') as file_h: mem_object = self._serializer('load', file_h) super(Configuration, self).__init__(mem_object, prefix) @property def configuration_file_ext(self): """ The extension of the current configuration file. """ return os.path.splitext(self.configuration_file)[1][1:] def _serializer(self, operation, *args): if not self.configuration_file_ext in SERIALIZER_DRIVERS: raise ValueError('unknown file type \'' + self.configuration_file_ext + '\'') function = SERIALIZER_DRIVERS[self.configuration_file_ext][operation] return function(*args) def get_missing(self, verify_file): """ Use a verification configuration which has a list of required options and their respective types. This information is used to identify missing and incompatible options in the loaded configuration. :param str verify_file: The file to load for verification data. :return: A dictionary of missing and incompatible settings. :rtype: dict """ vconf = Configuration(verify_file) missing = {} for setting, setting_type in vconf.get('settings').items(): if not self.has_option(setting): missing['missing'] = missing.get('settings', []) missing['missing'].append(setting) elif not type(self.get(setting)).__name__ == setting_type: missing['incompatible'] = missing.get('incompatible', []) missing['incompatible'].append((setting, setting_type)) return missing def save(self): """ Save the current configuration to disk. """ with open(self.configuration_file, 'w') as file_h: file_h.write(self._serializer('dumps', self._storage)) def main(): import argparse parser = argparse.ArgumentParser(description='Parse a configuration file', conflict_handler='resolve') parser.add_argument('config_file', action='store', help='configuration file to parse') parser.add_argument('option', action='store', help='option to retreive the value from') arguments = parser.parse_args() config = Configuration(arguments.config_file) if not config.has_option(arguments.option): return 1 option_value = config.get(arguments.option) if isinstance(option_value, list): for value in option_value: print(value) # pylint: disable=C0325 return 0 print(option_value) # pylint: disable=C0325 return 0 if __name__ == '__main__': sys.exit(main()) smoke-zephyr-2.0.1/smoke_zephyr/job.py000066400000000000000000000323461376544576500200500ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # smoke_zephyr/job.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import datetime import logging import threading import time import uuid __all__ = ['JobManager', 'JobRequestDelete'] def normalize_job_id(job_id): """ Convert a value to a job id. :param job_id: Value to convert. :type job_id: int, str :return: The job id. :rtype: :py:class:`uuid.UUID` """ if not isinstance(job_id, uuid.UUID): job_id = uuid.UUID(job_id) return job_id class JobRequestDelete(object): """ An instance of this class can be returned by a job callback to request that the job be deleted and not executed again. """ pass class JobRun(threading.Thread): def __init__(self, callback, args): super(JobRun, self).__init__() self.daemon = False self.callback = callback self.callback_args = args self.request_delete = False self.exception = None self.reaped = False def run(self): try: result = self.callback(*self.callback_args) if isinstance(result, JobRequestDelete): self.request_delete = True except Exception as error: self.exception = error return # Job Dictionary Details: # last_run: datetime.datetime # run_every: datetime.timedelta # job: None or JobRun instance # callback: function # parameters: list of parameters to be passed to the callback function # enabled: boolean if false do not run the job # tolerate_exceptions: boolean if true this job will run again after a failure # run_count: number of times the job has been ran # expiration: number of times to run a job, datetime.timedelta instance or None class JobManager(object): """ This class provides a threaded job manager for periodically executing arbitrary functions in an asynchronous fashion. """ def __init__(self, use_utc=True, logger_name=None): """ :param bool use_utc: Whether or not to use UTC time internally. :param str logger_name: A specific name to use for the logger. """ self._thread = threading.Thread(target=self._run) self._thread.daemon = True self._jobs = {} self._thread_running = threading.Event() self._thread_shutdown = threading.Event() self._thread_shutdown.set() self._job_lock = threading.RLock() self.use_utc = use_utc self.logger = logging.getLogger(logger_name or self.__class__.__name__) self.exc_info = False def __len__(self): return self.job_count() def _job_execute(self, job_id): self._job_lock.acquire() job_desc = self._jobs[job_id] job_desc['last_run'] = self.now() job_desc['run_count'] += 1 self.logger.debug('executing job with id: ' + str(job_id) + ' and callback function: ' + job_desc['callback'].__name__) job_desc['job'] = JobRun(job_desc['callback'], job_desc['parameters']) job_desc['job'].start() self._job_lock.release() def _run(self): self.logger.info('the job manager has been started') self._thread_running.set() self._thread_shutdown.clear() self._job_lock.acquire() while self._thread_running.is_set(): self._job_lock.release() time.sleep(1) self._job_lock.acquire() if not self._thread_running.is_set(): break # reap jobs jobs_for_removal = set() for job_id, job_desc in self._jobs.items(): job_obj = job_desc['job'] if job_obj.is_alive() or job_obj.reaped: continue if job_obj.exception is not None: if job_desc['tolerate_exceptions']: self.logger.warning('job ' + str(job_id) + ' encountered exception: ' + job_obj.exception.__class__.__name__, exc_info=self.exc_info) else: self.logger.error('job ' + str(job_id) + ' encountered an error and is not set to tolerate exceptions', self.exc_info) jobs_for_removal.add(job_id) if isinstance(job_desc['expiration'], int): if job_desc['expiration'] <= 0: jobs_for_removal.add(job_id) else: job_desc['expiration'] -= 1 elif isinstance(job_desc['expiration'], datetime.datetime): if self.now_is_after(job_desc['expiration']): jobs_for_removal.add(job_id) if job_obj.request_delete: jobs_for_removal.add(job_id) job_obj.reaped = True for job_id in jobs_for_removal: self.job_delete(job_id) # sow jobs for job_id, job_desc in self._jobs.items(): if job_desc['last_run'] is not None and self.now_is_before(job_desc['last_run'] + job_desc['run_every']): continue if job_desc['job'].is_alive(): continue if not job_desc['job'].reaped: continue if not job_desc['enabled']: continue self._job_execute(job_id) self._job_lock.release() self._thread_shutdown.set() def now(self): """ Return a :py:class:`datetime.datetime` instance representing the current time. :rtype: :py:class:`datetime.datetime` """ if self.use_utc: return datetime.datetime.utcnow() else: return datetime.datetime.now() def now_is_after(self, dt): """ Check whether the datetime instance described in dt is after the current time. :param dt: Value to compare. :type dt: :py:class:`datetime.datetime` :rtype: bool """ return bool(dt <= self.now()) def now_is_before(self, dt): """ Check whether the datetime instance described in dt is before the current time. :param dt: Value to compare. :type dt: :py:class:`datetime.datetime` :rtype: bool """ return bool(dt >= self.now()) def start(self): """ Start the JobManager thread. """ if self._thread_running.is_set(): raise RuntimeError('the JobManager has already been started') self._thread.start() self._thread_running.wait() return def stop(self): """ Stop the JobManager thread. """ self.logger.debug('stopping the job manager') self._thread_running.clear() self._thread_shutdown.wait() self._job_lock.acquire() self.logger.debug('waiting on ' + str(len(self._jobs)) + ' job threads') for job_desc in self._jobs.values(): if job_desc['job'] is None: continue if not job_desc['job'].is_alive(): continue job_desc['job'].join() # the job lock must be released before the thread can be joined because the thread routine acquires it before # checking if it should exit, see https://github.com/zeroSteiner/smoke-zephyr/issues/4 for more details self._job_lock.release() self._thread.join() self.logger.info('the job manager has been stopped') return def job_run(self, callback, parameters=None): """ Add a job and run it once immediately. :param function callback: The function to run asynchronously. :param parameters: The parameters to be provided to the callback. :type parameters: list, tuple :return: The job id. :rtype: :py:class:`uuid.UUID` """ if not self._thread_running.is_set(): raise RuntimeError('the JobManager is not running') parameters = (parameters or ()) if not isinstance(parameters, (list, tuple)): parameters = (parameters,) job_desc = {} job_desc['job'] = JobRun(callback, parameters) job_desc['last_run'] = None job_desc['run_every'] = datetime.timedelta(0, 1) job_desc['callback'] = callback job_desc['parameters'] = parameters job_desc['enabled'] = True job_desc['tolerate_exceptions'] = False job_desc['run_count'] = 0 job_desc['expiration'] = 0 job_id = uuid.uuid4() self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__) with self._job_lock: self._jobs[job_id] = job_desc self._job_execute(job_id) return job_id def job_add(self, callback, parameters=None, hours=0, minutes=0, seconds=0, tolerate_exceptions=True, expiration=None): """ Add a job to the job manager. :param function callback: The function to run asynchronously. :param parameters: The parameters to be provided to the callback. :type parameters: list, tuple :param int hours: Number of hours to sleep between running the callback. :param int minutes: Number of minutes to sleep between running the callback. :param int seconds: Number of seconds to sleep between running the callback. :param bool tolerate_execptions: Whether to continue running a job after it has thrown an exception. :param expiration: When to expire and remove the job. If an integer is provided, the job will be executed that many times. If a datetime or timedelta instance is provided, then the job will be removed after the specified time. :type expiration: int, :py:class:`datetime.timedelta`, :py:class:`datetime.datetime` :return: The job id. :rtype: :py:class:`uuid.UUID` """ if not self._thread_running.is_set(): raise RuntimeError('the JobManager is not running') parameters = (parameters or ()) if not isinstance(parameters, (list, tuple)): parameters = (parameters,) job_desc = {} job_desc['job'] = JobRun(callback, parameters) job_desc['last_run'] = None job_desc['run_every'] = datetime.timedelta(0, ((hours * 60 * 60) + (minutes * 60) + seconds)) job_desc['callback'] = callback job_desc['parameters'] = parameters job_desc['enabled'] = True job_desc['tolerate_exceptions'] = tolerate_exceptions job_desc['run_count'] = 0 if isinstance(expiration, int): job_desc['expiration'] = expiration elif isinstance(expiration, datetime.timedelta): job_desc['expiration'] = self.now() + expiration elif isinstance(expiration, datetime.datetime): job_desc['expiration'] = expiration else: job_desc['expiration'] = None job_id = uuid.uuid4() self.logger.info('adding new job with id: ' + str(job_id) + ' and callback function: ' + callback.__name__) with self._job_lock: self._jobs[job_id] = job_desc return job_id def job_count(self): """ Return the number of jobs. :return: The number of jobs. :rtype: int """ return len(self._jobs) def job_count_enabled(self): """ Return the number of enabled jobs. :return: The number of jobs that are enabled. :rtype: int """ enabled = 0 for job_desc in self._jobs.values(): if job_desc['enabled']: enabled += 1 return enabled def job_enable(self, job_id): """ Enable a job. :param job_id: Job identifier to enable. :type job_id: :py:class:`uuid.UUID` """ job_id = normalize_job_id(job_id) with self._job_lock: job_desc = self._jobs[job_id] job_desc['enabled'] = True def job_disable(self, job_id): """ Disable a job. Disabled jobs will not be executed. :param job_id: Job identifier to disable. :type job_id: :py:class:`uuid.UUID` """ job_id = normalize_job_id(job_id) with self._job_lock: job_desc = self._jobs[job_id] job_desc['enabled'] = False def job_delete(self, job_id, wait=True): """ Delete a job. :param job_id: Job identifier to delete. :type job_id: :py:class:`uuid.UUID` :param bool wait: If the job is currently running, wait for it to complete before deleting it. """ job_id = normalize_job_id(job_id) self.logger.info('deleting job with id: ' + str(job_id) + ' and callback function: ' + self._jobs[job_id]['callback'].__name__) job_desc = self._jobs[job_id] with self._job_lock: job_desc['enabled'] = False if wait and self.job_is_running(job_id): job_desc['job'].join() del self._jobs[job_id] def job_exists(self, job_id): """ Check if a job identifier exists. :param job_id: Job identifier to check. :type job_id: :py:class:`uuid.UUID` :rtype: bool """ job_id = normalize_job_id(job_id) return job_id in self._jobs def job_is_enabled(self, job_id): """ Check if a job is enabled. :param job_id: Job identifier to check the status of. :type job_id: :py:class:`uuid.UUID` :rtype: bool """ job_id = normalize_job_id(job_id) job_desc = self._jobs[job_id] return job_desc['enabled'] def job_is_running(self, job_id): """ Check if a job is currently running. False is returned if the job does not exist. :param job_id: Job identifier to check the status of. :type job_id: :py:class:`uuid.UUID` :rtype: bool """ job_id = normalize_job_id(job_id) if job_id not in self._jobs: return False job_desc = self._jobs[job_id] if job_desc['job']: return job_desc['job'].is_alive() return False smoke-zephyr-2.0.1/smoke_zephyr/requirements.py000066400000000000000000000075511376544576500220210ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # smoke_zephyr/requirements.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import distutils.version # pylint: disable=E0611 import re import pkg_resources def check_requirements(requirements, ignore=None): """ Parse requirements for package information to determine if all requirements are met. The *requirements* argument can be a string to a requirements file, a file like object to be read, or a list of strings representing the package requirements. :param requirements: The file to parse. :type requirements: file obj, list, str, tuple :param ignore: A sequence of packages to ignore. :type ignore: list, tuple :return: A list of missing or incompatible packages. :rtype: list """ ignore = (ignore or []) not_satisfied = [] working_set = pkg_resources.working_set installed_packages = dict((p.project_name, p) for p in working_set) # pylint: disable=E1133 if isinstance(requirements, str): with open(requirements, 'r') as file_h: requirements = file_h.readlines() elif hasattr(requirements, 'readlines'): requirements = requirements.readlines() elif not isinstance(requirements, (list, tuple)): raise TypeError('invalid type for argument requirements') for req_line in requirements: req_line = req_line.strip() parts = re.match(r'^([\w\-]+)(([<>=]=)(\d+(\.\d+)*))?$', req_line) if not parts: raise ValueError("requirement '{0}' is in an invalid format".format(req_line)) req_pkg = parts.group(1) if req_pkg in ignore: continue if req_pkg not in installed_packages: try: find_result = working_set.find(pkg_resources.Requirement.parse(req_line)) except pkg_resources.ResolutionError: find_result = False if not find_result: not_satisfied.append(req_pkg) continue if not parts.group(2): continue req_version = distutils.version.StrictVersion(parts.group(4)) installed_pkg = installed_packages[req_pkg] installed_version = re.match(r'^((\d+\.)*\d+)', installed_pkg.version) if not installed_version: not_satisfied.append(req_pkg) continue installed_version = distutils.version.StrictVersion(installed_version.group(0)) if parts.group(3) == '==' and installed_version != req_version: not_satisfied.append(req_pkg) elif parts.group(3) == '>=' and installed_version < req_version: not_satisfied.append(req_pkg) elif parts.group(3) == '<=' and installed_version > req_version: not_satisfied.append(req_pkg) return not_satisfied smoke-zephyr-2.0.1/smoke_zephyr/utilities.py000066400000000000000000000657741376544576500213240ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # smoke_zephyr/utilities.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections import functools import inspect import ipaddress import itertools import logging import os import random import re import shutil import string import subprocess import sys import time import unittest import urllib.parse import urllib.request import weakref EMAIL_REGEX = re.compile(r'^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,6}$', flags=re.IGNORECASE) class AttributeDict(dict): """ This class allows dictionary keys to be accessed as attributes. For example: ``ad = AttributeDict(test=1); ad['test'] == ad.test`` """ __getattr__ = dict.__getitem__ __setattr__ = dict.__setitem__ class BruteforceGenerator(object): """ This class allows itarating sequences for bruteforcing. """ # requirments = itertools def __init__(self, startlen, endlen=None, charset=None): """ :param int startlen: The minimum sequence size to generate. :param int endlen: The maximum sequence size to generate. :param charset: The characters to include in the resulting sequences. """ self.startlen = startlen if endlen is None: self.endlen = startlen else: self.endlen = endlen if charset is None: charset = list(map(chr, range(0, 256))) elif isinstance(charset, str): charset = list(charset) elif isinstance(charset, bytes): charset = list(map(chr, charset)) charset.sort() self.charset = tuple(charset) self.length = self.startlen self._product = itertools.product(self.charset, repeat=self.length) self._next = self.__next__ def __iter__(self): return self def __next__(self): return self.next() def next(self): try: value = next(self._product) except StopIteration: if self.length == self.endlen: raise StopIteration self.length += 1 self._product = itertools.product(self.charset, repeat=self.length) value = next(self._product) return ''.join(value) _ArgSpec = collections.namedtuple('_ArgSpec', ('args', 'varargs', 'keywords', 'defaults')) class Cache(object): """ This class provides a simple to use cache object which can be applied as a decorator. """ def __init__(self, timeout): """ :param timeout: The amount of time in seconds that a cached result will be considered valid for. :type timeout: int, str """ if isinstance(timeout, str): timeout = parse_timespan(timeout) self.cache_timeout = timeout self._target_function = None self._target_function_arg_spec = None self.__cache = {} self.__obj = None def __get__(self, instance, _): self.__obj = instance return self def __call__(self, *args, **kwargs): if not getattr(self, '_target_function', False): target_function = args[0] if not inspect.isfunction(target_function) and not inspect.ismethod(target_function): raise RuntimeError('the cached object must be a function or method') arg_spec = inspect.getfullargspec(target_function) # pylint: disable=W1505 arg_spec = _ArgSpec(args=arg_spec.args, varargs=arg_spec.varargs, keywords=arg_spec.kwonlyargs, defaults=arg_spec.defaults) if arg_spec.varargs or arg_spec.keywords: raise RuntimeError('the cached function can not use dynamic args or kwargs') self._target_function = target_function self._target_function_arg_spec = arg_spec return functools.wraps(target_function)(self) self.cache_clean() if self.__obj is not None: args = (self.__obj,) + args self.__obj = None is_method = True else: is_method = False args = self._flatten_args(args, kwargs) if is_method: inst = args.popleft() args = tuple(args) ref = weakref.ref(inst, functools.partial(self._ref_callback, args)) cache_args = (ref,) + args args = (inst,) + args else: cache_args = tuple(args) args = tuple(args) result, expiration = self.__cache.get(cache_args, (None, 0)) if expiration > time.time(): return result result = self._target_function(*args) self.__cache[cache_args] = (result, time.time() + self.cache_timeout) return result def __repr__(self): return "".format(self._target_function.__name__, id(self._target_function)) def _flatten_args(self, args, kwargs): flattened_args = collections.deque(args) arg_spec = self._target_function_arg_spec arg_spec_defaults = (arg_spec.defaults or []) default_args = tuple(arg_spec.args[:-len(arg_spec_defaults)]) default_kwargs = dict(zip(arg_spec.args[-len(arg_spec_defaults):], arg_spec_defaults)) for arg_id in range(len(args), len(arg_spec.args)): arg_name = arg_spec.args[arg_id] if arg_name in default_args: if not arg_name in kwargs: raise TypeError("{0}() missing required argument '{1}'".format(self._target_function.__name__, arg_name)) flattened_args.append(kwargs.pop(arg_name)) else: flattened_args.append(kwargs.pop(arg_name, default_kwargs[arg_name])) if kwargs: unexpected_kwargs = tuple("'{0}'".format(a) for a in kwargs.keys()) raise TypeError("{0}() got an unexpected keyword argument{1} {2}".format(self._target_function.__name__, ('' if len(unexpected_kwargs) == 1 else 's'), ', '.join(unexpected_kwargs))) return flattened_args def _ref_callback(self, args, ref): args = (ref,) + args self.__cache.pop(args, None) def cache_clean(self): """ Remove expired items from the cache. """ now = time.time() keys_for_removal = collections.deque() for key, (_, expiration) in self.__cache.items(): if expiration < now: keys_for_removal.append(key) for key in keys_for_removal: del self.__cache[key] def cache_clear(self): """ Remove all items from the cache. """ self.__cache = {} class FileWalker(object): """ This class is used to easily iterate over files and subdirectories of a specified parent directory. """ def __init__(self, filespath, absolute_path=False, skip_files=False, skip_dirs=False, filter_func=None, follow_links=False, max_depth=None): """ .. versionchanged:: 1.4.0 Added the *follow_links* and *max_depth* parameters. :param str filespath: A path to either a file or a directory. If a file is passed then that will be the only file returned during the iteration. If a directory is passed, all files and subdirectories will be recursively returned during the iteration. :param bool absolute_path: Whether or not the absolute path or a relative path should be returned. :param bool skip_files: Whether or not to skip files. :param bool skip_dirs: Whether or not to skip directories. :param function filter_func: If defined, the filter_func function will be called for each path (with the path as the one and only argument) and if the function returns false the path will be skipped. :param bool follow_links: Whether or not to follow directories pointed to by symlinks. :param max_depth: A maximum depth to recurse into. """ if not (os.path.isfile(filespath) or os.path.isdir(filespath)): raise Exception(filespath + ' is neither a file or directory') if absolute_path: self.filespath = os.path.abspath(filespath) else: self.filespath = os.path.relpath(filespath) self.skip_files = skip_files self.skip_dirs = skip_dirs self.filter_func = filter_func self.follow_links = follow_links self.max_depth = float('inf') if max_depth is None else max_depth if os.path.isdir(self.filespath): self._walk = None self._next = self._next_dir elif os.path.isfile(self.filespath): self._next = self._next_file def __iter__(self): return self._next() def _skip(self, cur_file): if self.skip_files and os.path.isfile(cur_file): return True if self.skip_dirs and os.path.isdir(cur_file): return True if self.filter_func is not None: if not self.filter_func(cur_file): return True return False def _next_dir(self): for root, dirs, files in os.walk(self.filespath, followlinks=self.follow_links): if root == self.filespath: depth = 0 else: depth = os.path.relpath(root, start=self.filespath).count(os.path.sep) + 1 if depth >= self.max_depth: continue for entry in itertools.chain(dirs, files): current_path = os.path.join(root, entry) if not self._skip(current_path): yield current_path if self.max_depth >= 0 and not self._skip(self.filespath): yield self.filespath def _next_file(self): if self.max_depth >= 0 and not self._skip(self.filespath): yield self.filespath class SectionConfigParser(object): """ Proxy access to a section of a ConfigParser object. """ __version__ = '0.2' def __init__(self, section_name, config_parser): """ :param str section_name: Name of the section to proxy access for. :param config_parser: ConfigParser object to proxy access for. :type config_parse: :py:class:`ConfigParser.ConfigParser` """ self.section_name = section_name self.config_parser = config_parser def _get_raw(self, option, opt_type, default=None): get_func = getattr(self.config_parser, 'get' + opt_type) if default is None: return get_func(self.section_name, option) elif self.config_parser.has_option(self.section_name, option): return get_func(self.section_name, option) else: return default def get(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. """ return self._get_raw(option, '', default) def getint(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. :rtype: int """ return self._get_raw(option, 'int', default) def getfloat(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. :rtype: float """ return self._get_raw(option, 'float', default) def getboolean(self, option, default=None): """ Retrieve *option* from the config, returning *default* if it is not present. :param str option: The name of the value to return. :param default: Default value to return if the option does not exist. :rtype: bool """ return self._get_raw(option, 'boolean', default) def has_option(self, option): """ Check that *option* exists in the configuration file. :param str option: The name of the option to check. :rtype: bool """ return self.config_parser.has_option(self.section_name, option) def options(self): """ Get a list of all options that are present in the section of the configuration. :return: A list of all set options. :rtype: list """ return self.config_parser.options(self.section_name) def items(self): """ Return all options and their values in the form of a list of tuples. :return: A list of all values and options. :rtype: list """ return self.config_parser.items(self.section_name) def set(self, option, value): """ Set an option to an arbitrary value. :param str option: The name of the option to set. :param value: The value to set the option to. """ self.config_parser.set(self.section_name, option, value) class TestCase(unittest.TestCase): """ This class provides additional functionality over the built in :py:class:`unittest.TestCase` object, including better compatibility for methods across Python 2.x and Python 3.x. """ def __init__(self, *args, **kwargs): super(TestCase, self).__init__(*args, **kwargs) if not hasattr(self, 'assertRegex') and hasattr(self, 'assertRegexpMatches'): self.assertRegex = self.assertRegexpMatches if not hasattr(self, 'assertNotRegex') and hasattr(self, 'assertNotRegexpMatches'): self.assertNotRegex = self.assertNotRegexpMatches if not hasattr(self, 'assertRaisesRegex') and hasattr(self, 'assertRaisesRegexp'): self.assertRaisesRegex = self.assertRaisesRegexp def configure_stream_logger(logger='', level=None, formatter='%(levelname)-8s %(message)s'): """ Configure the default stream handler for logging messages to the console, remove other logging handlers, and enable capturing warnings. .. versionadded:: 1.3.0 :param str logger: The logger to add the stream handler for. :param level: The level to set the logger to, will default to WARNING if no level is specified. :type level: None, int, str :param formatter: The format to use for logging messages to the console. :type formatter: str, :py:class:`logging.Formatter` :return: The new configured stream handler. :rtype: :py:class:`logging.StreamHandler` """ level = level or logging.WARNING if isinstance(level, str): level = getattr(logging, level, None) if level is None: raise ValueError('invalid log level: ' + level) root_logger = logging.getLogger('') for handler in root_logger.handlers: root_logger.removeHandler(handler) logging.getLogger(logger).setLevel(logging.DEBUG) console_log_handler = logging.StreamHandler() console_log_handler.setLevel(level) if isinstance(formatter, str): formatter = logging.Formatter(formatter) elif not isinstance(formatter, logging.Formatter): raise TypeError('formatter must be an instance of logging.Formatter') console_log_handler.setFormatter(formatter) logging.getLogger(logger).addHandler(console_log_handler) logging.captureWarnings(True) return console_log_handler def download(url, filename=None): """ Download a file from a url and save it to disk. :param str url: The URL to fetch the file from. :param str filename: The destination file to write the data to. """ # requirements os, shutil, urllib.parse, urllib.request if not filename: url_parts = urllib.parse.urlparse(url) filename = os.path.basename(url_parts.path) url_h = urllib.request.urlopen(url) with open(filename, 'wb') as file_h: shutil.copyfileobj(url_h, file_h) url_h.close() return def escape_single_quote(unescaped): """ Escape a string containing single quotes and backslashes with backslashes. This is useful when a string is evaluated in some way. :param str unescaped: The string to escape. :return: The escaped string. :rtype: str """ # requirements = re return re.sub(r'(\'|\\)', r'\\\1', unescaped) def format_bytes_size(val): """ Take a number of bytes and convert it to a human readable number. :param int val: The number of bytes to format. :return: The size in a human readable format. :rtype: str """ if not val: return '0 bytes' for sz_name in ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']: if val < 1024.0: return "{0:.2f} {1}".format(val, sz_name) val /= 1024.0 raise OverflowError() def grep(expression, file, flags=0, invert=False): """ Search a file and return a list of all lines that match a regular expression. :param str expression: The regex to search for. :param file: The file to search in. :type file: str, file :param int flags: The regex flags to use when searching. :param bool invert: Select non matching lines instead. :return: All the matching lines. :rtype: list """ # requirements = re if isinstance(file, str): file = open(file) lines = [] for line in file: if bool(re.search(expression, line, flags=flags)) ^ invert: lines.append(line) return lines def is_valid_email_address(email_address): """ Check that the string specified appears to be a valid email address. :param str email_address: The email address to validate. :return: Whether the email address appears to be valid or not. :rtype: bool """ # requirements = re return EMAIL_REGEX.match(email_address) != None def get_ip_list(ip_network, mask=None): """ Quickly convert an IPv4 or IPv6 network (CIDR or Subnet) to a list of individual IPs in their string representation. :param str ip_network: :param int mask: :return: list """ if mask and '/' not in ip_network: net = ipaddress.ip_network("{0}/{1}".format(ip_network, mask)) elif '/' not in ip_network: return [str(ipaddress.ip_address(ip_network))] else: net = ipaddress.ip_network(ip_network) hosts = net.hosts() if net.netmask == ipaddress.IPv4Address('255.255.255.255') and sys.version_info > (3, 9): # see: https://github.com/zeroSteiner/smoke-zephyr/issues/8 hosts = [] return [host.__str__() for host in hosts] def sort_ipv4_list(ip_list, unique=True): """ Sorts a provided list of IPv4 addresses. Optionally can remove duplicate values Supports IPv4 addresses with ports included (ex: [10.11.12.13:80, 10.11.12.13:8080]) :param ip_list: (list) iterable of IPv4 Addresses :param unique: (bool) removes duplicate values if true :return: sorted list of IP addresses """ if unique: ip_list = list(set(ip_list)) ipv4_list = sorted([i.rstrip(':') for i in ip_list], key=lambda ip: ( int(ip.split(".")[0]), int(ip.split(".")[1]), int(ip.split(".")[2]), int(ip.split(".")[3].split(':')[0]), int(ip.split(":")[1]) if ":" in ip else 0 )) return ipv4_list def open_uri(uri): """ Open a URI in a platform intelligent way. On Windows this will use 'cmd.exe /c start' and on Linux this will use gvfs-open or xdg-open depending on which is available. If no suitable application can be found to open the URI, a RuntimeError will be raised. .. versionadded:: 1.3.0 :param str uri: The URI to open. """ close_fds = True startupinfo = None proc_args = [] if sys.platform.startswith('win'): proc_args.append(which('cmd.exe')) proc_args.append('/c') proc_args.append('start') uri = uri.replace('&', '^&') close_fds = False startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW startupinfo.wShowWindow = subprocess.SW_HIDE elif which('gvfs-open'): proc_args.append(which('gvfs-open')) elif which('xdg-open'): proc_args.append(which('xdg-open')) else: raise RuntimeError('could not find suitable application to open uri') proc_args.append(uri) proc_h = subprocess.Popen(proc_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=close_fds, startupinfo=startupinfo) return proc_h.wait() == 0 def parse_case_camel_to_snake(camel): """ Convert a string from CamelCase to snake_case. :param str camel: The CamelCase string to convert. :return: The snake_case version of string. :rtype: str """ # requirements = re return re.sub('((?<=[a-z0-9])[A-Z]|(?!^)[A-Z](?=[a-z]))', r'_\1', camel).lower() def parse_case_snake_to_camel(snake, upper_first=True): """ Convert a string from snake_case to CamelCase. :param str snake: The snake_case string to convert. :param bool upper_first: Whether or not to capitalize the first character of the string. :return: The CamelCase version of string. :rtype: str """ snake = snake.split('_') first_part = snake[0] if upper_first: first_part = first_part.title() return first_part + ''.join(word.title() for word in snake[1:]) def parse_server(server, default_port): """ Convert a server string to a tuple suitable for passing to connect, for example converting 'www.google.com:443' to ('www.google.com', 443). :param str server: The server string to convert. :param int default_port: The port to use in case one is not specified in the server string. :return: The parsed server information. :rtype: tuple """ server = server.rsplit(':', 1) host = server[0] if host.startswith('[') and host.endswith(']'): host = host[1:-1] if len(server) == 1: return (host, default_port) port = server[1] if not port: port = default_port else: port = int(port) return (host, port) def parse_timespan(timedef): """ Convert a string timespan definition to seconds, for example converting '1m30s' to 90. If *timedef* is already an int, the value will be returned unmodified. :param timedef: The timespan definition to convert to seconds. :type timedef: int, str :return: The converted value in seconds. :rtype: int """ if isinstance(timedef, int): return timedef converter_order = ('w', 'd', 'h', 'm', 's') converters = { 'w': 604800, 'd': 86400, 'h': 3600, 'm': 60, 's': 1 } timedef = timedef.lower() if timedef.isdigit(): return int(timedef) elif len(timedef) == 0: return 0 seconds = -1 for spec in converter_order: timedef = timedef.split(spec) if len(timedef) == 1: timedef = timedef[0] continue elif len(timedef) > 2 or not timedef[0].isdigit(): seconds = -1 break adjustment = converters[spec] seconds = max(seconds, 0) seconds += (int(timedef[0]) * adjustment) timedef = timedef[1] if not len(timedef): break if seconds < 0: raise ValueError('invalid time format') return seconds def parse_to_slug(words, maxlen=24): """ Parse a string into a slug format suitable for use in URLs and other character restricted applications. Only utf-8 strings are supported at this time. :param str words: The words to parse. :param int maxlen: The maximum length of the slug. :return: The parsed words as a slug. :rtype: str """ slug = '' maxlen = min(maxlen, len(words)) for c in words: if len(slug) == maxlen: break c = ord(c) if c == 0x27: continue elif c >= 0x30 and c <= 0x39: slug += chr(c) elif c >= 0x41 and c <= 0x5a: slug += chr(c + 0x20) elif c >= 0x61 and c <= 0x7a: slug += chr(c) elif len(slug) and slug[-1] != '-': slug += '-' if len(slug) and slug[-1] == '-': slug = slug[:-1] return slug def random_string_alphanumeric(size): """ Generate a random string of *size* length consisting of mixed case letters and numbers. This function is not meant for cryptographic purposes. :param int size: The length of the string to return. :return: A string consisting of random characters. :rtype: str """ # requirements = random, string return ''.join(random.choice(string.ascii_letters + string.digits) for x in range(size)) def random_string_lower_numeric(size): """ Generate a random string of *size* length consisting of lowercase letters and numbers. This function is not meant for cryptographic purposes. :param int size: The length of the string to return. :return: A string consisting of random characters. :rtype: str """ # requirements = random, string return ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(size)) def selection_collision(selections, poolsize): """ Calculate the probability that two random values selected from an arbitrary sized pool of unique values will be equal. This is commonly known as the "Birthday Problem". :param int selections: The number of random selections. :param int poolsize: The number of unique random values in the pool to choose from. :rtype: float :return: The chance that a collision will occur as a percentage. """ # requirments = sys probability = 100.0 poolsize = float(poolsize) for i in range(selections): probability = probability * (poolsize - i) / poolsize probability = (100.0 - probability) return probability def unescape_single_quote(escaped): """ Unescape a string which uses backslashes to escape single quotes. :param str escaped: The string to unescape. :return: The unescaped string. :rtype: str """ escaped = escaped.replace('\\\\', '\\') escaped = escaped.replace('\\\'', '\'') return escaped def unique(seq, key=None): """ Create a unique list or tuple from a provided list or tuple and preserve the order. :param seq: The list or tuple to preserve unique items from. :type seq: list, tuple :param key: If key is provided it will be called during the comparison process. :type key: function, None """ if key is None: key = lambda x: x preserved_type = type(seq) if preserved_type not in (list, tuple): raise TypeError("unique argument 1 must be list or tuple, not {0}".format(preserved_type.__name__)) seen = [] result = [] for item in seq: marker = key(item) if marker in seen: continue seen.append(marker) result.append(item) return preserved_type(result) def weighted_choice(choices, weight): """ Make a random selection from the specified choices. Apply the *weight* function to each to return a positive integer representing shares of selection pool the choice should received. The *weight* function is passed a single argument of the choice from the *choices* iterable. :param choices: The choices to select from. :type choices: list, tuple :param weight: The function used for gather weight information for choices. :type weight: function :return: A randomly selected choice from the provided *choices*. """ # requirements = random weights = [] # get weight values for each of the choices for choice in choices: choice_weight = weight(choice) if not (isinstance(choice_weight, int) and choice_weight > 0): raise TypeError('weight results must be positive integers') weights.append(choice_weight) # make a selection within the acceptable range selection = random.randint(0, sum(weights) - 1) # find and return the corresponding choice for idx, choice in enumerate(choices): if selection < sum(weights[:idx + 1]): return choice raise RuntimeError('no selection could be made') def which(program): """ Locate an executable binary's full path by its name. :param str program: The executables name. :return: The full path to the executable. :rtype: str """ # requirements = os is_exe = lambda fpath: (os.path.isfile(fpath) and os.access(fpath, os.X_OK)) for path in os.environ['PATH'].split(os.pathsep): path = path.strip('"') exe_file = os.path.join(path, program) if is_exe(exe_file): return exe_file if is_exe(program): return os.path.abspath(program) return None def xfrange(start, stop=None, step=1): """ Iterate through an arithmetic progression. :param start: Starting number. :type start: float, int, long :param stop: Stopping number. :type stop: float, int, long :param step: Stepping size. :type step: float, int, long """ if stop is None: stop = start start = 0.0 start = float(start) while start < stop: yield start start += step smoke-zephyr-2.0.1/tests/000077500000000000000000000000001376544576500153375ustar00rootroot00000000000000smoke-zephyr-2.0.1/tests/__init__.py000066400000000000000000000034601376544576500174530ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # tests/__init__.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import logging from .argparse_types import ArgparseTypeTests from .job import JobManagerTests from .utilities import UtilitiesTests from .utilities import UtilitiesCacheTests if hasattr(logging, 'NullHandler'): logging.getLogger('').addHandler(logging.NullHandler()) smoke-zephyr-2.0.1/tests/argparse_types.py000066400000000000000000000074361376544576500207530ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # tests/argparse_types.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import argparse import logging import os import unittest from smoke_zephyr import argparse_types from smoke_zephyr import utilities class ArgparseTypeTests(utilities.TestCase): def _invalid_argparse_type(self, function, invalid): with self.assertRaises(argparse.ArgumentTypeError): function(invalid) def _valid_argparse_type(self, function, valid, valid_result=None, valid_result_type=None): valid_result = valid if valid_result == None else valid_result valid_result_type = valid_result_type or str result = function(valid) self.assertEqual(result, valid_result) self.assertIsInstance(result, valid_result_type) def test_bin_b64_type(self): self._invalid_argparse_type(argparse_types.bin_b64_type, '0') self._valid_argparse_type(argparse_types.bin_b64_type, 'SGVsbG8gV29ybGQh', b'Hello World!', bytes) def test_bin_hex_type(self): self._invalid_argparse_type(argparse_types.bin_hex_type, 'FAKE') self._valid_argparse_type(argparse_types.bin_hex_type, '48656c6c6f20576f726c6421', b'Hello World!', bytes) def test_dir_type(self): self._invalid_argparse_type(argparse_types.dir_type, 'FAKE') self._valid_argparse_type(argparse_types.dir_type, os.getcwd(), os.getcwd()) self._valid_argparse_type(argparse_types.dir_type, '.', '.') def test_log_level_type(self): self._invalid_argparse_type(argparse_types.log_level_type, 'FAKE') for level_name in ('NOTSET', 'DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'): self._valid_argparse_type(argparse_types.log_level_type, level_name, getattr(logging, level_name), int) def test_port_type(self): self._invalid_argparse_type(argparse_types.port_type, 'FAKE') self._invalid_argparse_type(argparse_types.port_type, '65536') self._valid_argparse_type(argparse_types.port_type, '80', 80, int) def test_timespan_type(self): self._invalid_argparse_type(argparse_types.timespan_type, 'FAKE') self._invalid_argparse_type(argparse_types.timespan_type, '30x') self._valid_argparse_type(argparse_types.timespan_type, '80', 80, int) self._valid_argparse_type(argparse_types.timespan_type, '1m', 60, int) self._valid_argparse_type(argparse_types.timespan_type, '1h', 3600, int) self._valid_argparse_type(argparse_types.timespan_type, '1h1m', 3660, int) if __name__ == '__main__': unittest.main() smoke-zephyr-2.0.1/tests/job.py000066400000000000000000000075411376544576500164720ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # tests/job.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import contextlib import time import unittest import uuid from smoke_zephyr import job from smoke_zephyr import utilities ROUTINE_SLEEP_TIME = 1.5 def test_routine(): time.sleep(ROUTINE_SLEEP_TIME) def test_routine_delete(): return job.JobRequestDelete() class JobManagerTests(utilities.TestCase): def setUp(self): self.assertGreater(ROUTINE_SLEEP_TIME, 1) self.jm = job.JobManager() self.jm.start() def tearDown(self): self.jm.stop() @contextlib.contextmanager def _job_add(self, callback, parameters=None, expiration=1, wait=True): jid = self.jm.job_add(callback, parameters, seconds=1, expiration=expiration) self.assertIsInstance(jid, uuid.UUID) self.assertTrue(self.jm.job_exists(jid)) self.assertEqual(self.jm.job_count(), 1) self.assertEqual(self.jm.job_count_enabled(), 1) yield jid if wait: time.sleep(ROUTINE_SLEEP_TIME * 2) def test_job_init(self): self.assertEqual(self.jm.job_count(), 0) self.assertEqual(self.jm.job_count_enabled(), 0) def test_job_add(self): test_list = [] data = utilities.random_string_alphanumeric(10) with self._job_add(test_list.append, data) as jid: self.assertEqual(len(test_list), 0) self.assertEqual(len(test_list), 1) self.assertIn(data, test_list) self.assertFalse(self.jm.job_exists(jid)) def test_job_delete(self): with self._job_add(test_routine, wait=False) as jid: self.jm.job_delete(jid) self.assertEqual(self.jm.job_count(), 0) self.assertEqual(self.jm.job_count_enabled(), 0) def test_job_disable(self): with self._job_add(test_routine, wait=False) as jid: self.jm.job_disable(jid) self.assertEqual(self.jm.job_count(), 1) self.assertEqual(self.jm.job_count_enabled(), 0) def test_job_request_delete(self): with self._job_add(test_routine_delete) as jid: self.assertTrue(self.jm.job_exists(jid)) result = self.jm.job_exists(jid) self.assertFalse(result) self.assertEqual(self.jm.job_count(), 0) self.assertEqual(self.jm.job_count_enabled(), 0) def test_job_run(self): jid = self.jm.job_run(test_routine) self.assertIsInstance(jid, uuid.UUID) self.assertTrue(self.jm.job_is_running(jid)) self.assertEqual(self.jm.job_count(), 1) self.assertEqual(self.jm.job_count_enabled(), 1) time.sleep(ROUTINE_SLEEP_TIME * 2) self.assertFalse(self.jm.job_is_running(jid)) if __name__ == '__main__': unittest.main() smoke-zephyr-2.0.1/tests/utilities.py000066400000000000000000000175361376544576500177400ustar00rootroot00000000000000#!/usr/bin/env python # -*- coding: utf-8 -*- # # tests/utilities.py # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of the project nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import collections import unittest from smoke_zephyr import utilities SINGLE_QUOTE_STRING_ESCAPED = """C:\\\\Users\\\\Alice\\\\Desktop\\\\Alice\\'s Secret File.txt""" SINGLE_QUOTE_STRING_UNESCAPED = """C:\\Users\\Alice\\Desktop\\Alice's Secret File.txt""" def cache_test(first_name, last_name, email=None, dob=None): return utilities.random_string_alphanumeric(24) class UtilitiesCacheTests(utilities.TestCase): def test_cache(self): target_function = utilities.Cache('6h')(cache_test) result_alice = target_function('alice', 'liddle') self.assertEqual(target_function('alice', 'liddle'), result_alice) result_calie = target_function('calie', 'liddle') self.assertEqual(target_function('calie', 'liddle'), result_calie) self.assertNotEqual(result_alice, result_calie) result_alice = target_function('alice', 'liddle', email='aliddle@wonderland.com') self.assertEqual(target_function('alice', 'liddle', email='aliddle@wonderland.com'), result_alice) self.assertNotEqual(result_alice, result_calie) def test_cache_cache_clear(self): target_function = utilities.Cache('6h')(cache_test) result_alice = target_function('alice', 'liddle') target_function.cache_clear() self.assertNotEqual(target_function('alice', 'liddle'), result_alice) def test_cache_flatten_args(self): target_function = utilities.Cache('6h')(cache_test) flatten_args = target_function._flatten_args # pylint: disable=W0212 self.assertEqual( flatten_args(('alice',), {'last_name': 'liddle'}), collections.deque(('alice', 'liddle', None, None)) ) self.assertEqual( flatten_args(('alice',), {'last_name': 'liddle', 'email': 'aliddle@wonderland.com'}), collections.deque(('alice', 'liddle', 'aliddle@wonderland.com', None)) ) self.assertEqual( flatten_args(('alice', 'liddle'), {}), collections.deque(('alice', 'liddle', None, None)) ) self.assertEqual( flatten_args(('alice', 'liddle'), {}), collections.deque(('alice', 'liddle', None, None)) ) self.assertEqual( flatten_args(('alice', 'liddle', 'aliddle@wonderland.com'), {}), collections.deque(('alice', 'liddle', 'aliddle@wonderland.com', None)) ) self.assertEqual( flatten_args(('alice', 'liddle'), {'dob': '1990'}), collections.deque(('alice', 'liddle', None, '1990')) ) with self.assertRaisesRegex(TypeError, r'^cache_test\(\) missing required argument \'last_name\'$'): flatten_args(('alice',), {}) with self.assertRaisesRegex(TypeError, r'^cache_test\(\) got an unexpected keyword argument \'foobar\'$'): flatten_args(('alice', 'liddle'), {'foobar': True}) class UtilitiesTests(utilities.TestCase): def test_attribute_dict(self): ad = utilities.AttributeDict(test=1) self.assertIsInstance(ad, utilities.AttributeDict) self.assertEqual(ad['test'], ad.test) self.assertEqual(ad.test, 1) def test_escape_single_quote(self): escaped_string = utilities.escape_single_quote(SINGLE_QUOTE_STRING_UNESCAPED) self.assertEqual(escaped_string, SINGLE_QUOTE_STRING_ESCAPED) def test_get_ip_list(self): cases = { ('192.168.1.0', None): ['192.168.1.0'], ('192.168.2.0/32', None): [], ('192.168.3.0/30', None): ['192.168.3.1', '192.168.3.2'], ('192.168.4.0', 32): [], ('192.168.5.0', 30): ['192.168.5.1', '192.168.5.2'], } for (ip_network, mask), ip_list in cases.items(): returned_ip_list = utilities.get_ip_list(ip_network, mask=mask) self.assertEqual(returned_ip_list, ip_list, msg=("get_ip_list({!r}, mask={!r}) != {!r}".format(ip_network, mask, ip_list))) def test_is_valid_email_address(self): valid_emails = [ 'aliddle@wonderland.com', 'aliddle@wonderland.co.uk', 'alice.liddle1+spam@wonderland.com', ] invalid_emails = [ 'aliddle.wonderland.com' 'aliddle+', 'aliddle@', 'aliddle', '', '@wonderland.com', '@wonder@land.com', 'aliddle@.com' ] for address in valid_emails: self.assertTrue(utilities.is_valid_email_address(address)) for address in invalid_emails: self.assertFalse(utilities.is_valid_email_address(address)) def test_parse_case_camel_to_snake(self): parsed = utilities.parse_case_camel_to_snake('SmokeZephyr') self.assertEqual(parsed, 'smoke_zephyr') def test_parse_case_snake_to_camel(self): parsed = utilities.parse_case_snake_to_camel('smoke_zephyr') self.assertEqual(parsed, 'SmokeZephyr') parsed = utilities.parse_case_snake_to_camel('smoke_zephyr', False) self.assertEqual(parsed, 'smokeZephyr') def test_parse_server(self): parsed = utilities.parse_server('127.0.0.1', 80) self.assertIsInstance(parsed, tuple) self.assertEqual(len(parsed), 2) self.assertEqual(parsed[0], '127.0.0.1') self.assertEqual(parsed[1], 80) parsed = utilities.parse_server('127.0.0.1:8080', 80) self.assertIsInstance(parsed, tuple) self.assertEqual(len(parsed), 2) self.assertEqual(parsed[0], '127.0.0.1') self.assertEqual(parsed[1], 8080) parsed = utilities.parse_server('[::1]:8080', 80) self.assertIsInstance(parsed, tuple) self.assertEqual(len(parsed), 2) self.assertEqual(parsed[0], '::1') self.assertEqual(parsed[1], 8080) def test_parse_timespan(self): self.assertRaises(ValueError, utilities.parse_timespan, 'fake') self.assertEqual(utilities.parse_timespan(''), 0) self.assertEqual(utilities.parse_timespan('30'), 30) self.assertEqual(utilities.parse_timespan('1m30s'), 90) self.assertEqual(utilities.parse_timespan('2h1m30s'), 7290) self.assertEqual(utilities.parse_timespan('3d2h1m30s'), 266490) def test_parse_to_slug(self): parsed = utilities.parse_to_slug('Smoke Zephyr!') self.assertEqual(parsed, 'smoke-zephyr') parsed = utilities.parse_to_slug('_Smoke Zephyr! (Next Try)') self.assertEqual(parsed, 'smoke-zephyr-next-try') def test_selection_collision(self): chance = utilities.selection_collision(30, 365) self.assertAlmostEqual(chance, 70.6316243) def test_sort_ipv4_list(self): cases = [ (['9.8.7.6', '1.2.3.4'], ['1.2.3.4', '9.8.7.6']), (['11.22.33.44', '2.3.4.5'], ['2.3.4.5', '11.22.33.44']), ] for in_list, out_list in cases: self.assertEquals(utilities.sort_ipv4_list(in_list), out_list) self.assertEquals(utilities.sort_ipv4_list(['1.2.3.4', '1.2.3.4'], unique=True), ['1.2.3.4']) def test_unescape_single_quote(self): unescaped_string = utilities.unescape_single_quote(SINGLE_QUOTE_STRING_ESCAPED) self.assertEqual(unescaped_string, SINGLE_QUOTE_STRING_UNESCAPED) if __name__ == '__main__': unittest.main()