pax_global_header00006660000000000000000000000064136560142700014516gustar00rootroot0000000000000052 comment=010f24e90da8316ed6dd22f6fd6c1eb5f3c47cf1 pebble-4.5.3/000077500000000000000000000000001365601427000127605ustar00rootroot00000000000000pebble-4.5.3/.gitignore000066400000000000000000000005341365601427000147520ustar00rootroot00000000000000*.py[cod] # C extensions *.so # Packages *.egg *.egg-info dist *build* *eggs* parts bin var sdist develop-eggs .installed.cfg lib lib64 __pycache__ # Installer logs pip-log.txt # Unit test / coverage reports .coverage .tox nosetests.xml # Translations *.mo # Mr Developer .mr.developer.cfg .project .pydevproject # Virtual environment venv/ pebble-4.5.3/.travis.yml000066400000000000000000000020741365601427000150740ustar00rootroot00000000000000dist: xenial language: python matrix: include: - os: linux sudo: required python: 2.7 - os: linux sudo: required python: 3.4 - os: linux sudo: required python: 3.5 - os: linux sudo: required python: 3.6 - os: linux sudo: required python: 3.7 - os: linux sudo: required python: 3.8 - os: linux sudo: required python: &pypy2 pypy2.7-6.0 - os: linux sudo: required python: &pypy3 pypy3.5-6.0 # - os: osx # language: generic # before_install: # - virtualenv env -p python # - source env/bin/activate # - os: osx # language: generic # before_install: # - brew update # - brew upgrade python # - pip install --upgrade virtualenv # - virtualenv env -p python # - source env/bin/activate install: - pip install --upgrade pip - pip install --upgrade setuptools - pip install --upgrade pytest - pip install . script: ./test/run-tests.sh branches: only: - master pebble-4.5.3/LICENSE000066400000000000000000000167201365601427000137730ustar00rootroot00000000000000GNU LESSER GENERAL PUBLIC LICENSE Version 3, 29 June 2007 Copyright (C) 2007 Free Software Foundation, Inc. Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. This version of the GNU Lesser General Public License incorporates the terms and conditions of version 3 of the GNU General Public License, supplemented by the additional permissions listed below. 0. Additional Definitions. As used herein, "this License" refers to version 3 of the GNU Lesser General Public License, and the "GNU GPL" refers to version 3 of the GNU General Public License. "The Library" refers to a covered work governed by this License, other than an Application or a Combined Work as defined below. An "Application" is any work that makes use of an interface provided by the Library, but which is not otherwise based on the Library. Defining a subclass of a class defined by the Library is deemed a mode of using an interface provided by the Library. A "Combined Work" is a work produced by combining or linking an Application with the Library. The particular version of the Library with which the Combined Work was made is also called the "Linked Version". The "Minimal Corresponding Source" for a Combined Work means the Corresponding Source for the Combined Work, excluding any source code for portions of the Combined Work that, considered in isolation, are based on the Application, and not on the Linked Version. The "Corresponding Application Code" for a Combined Work means the object code and/or source code for the Application, including any data and utility programs needed for reproducing the Combined Work from the Application, but excluding the System Libraries of the Combined Work. 1. Exception to Section 3 of the GNU GPL. You may convey a covered work under sections 3 and 4 of this License without being bound by section 3 of the GNU GPL. 2. Conveying Modified Versions. If you modify a copy of the Library, and, in your modifications, a facility refers to a function or data to be supplied by an Application that uses the facility (other than as an argument passed when the facility is invoked), then you may convey a copy of the modified version: a) under this License, provided that you make a good faith effort to ensure that, in the event an Application does not supply the function or data, the facility still operates, and performs whatever part of its purpose remains meaningful, or b) under the GNU GPL, with none of the additional permissions of this License applicable to that copy. 3. Object Code Incorporating Material from Library Header Files. The object code form of an Application may incorporate material from a header file that is part of the Library. You may convey such object code under terms of your choice, provided that, if the incorporated material is not limited to numerical parameters, data structure layouts and accessors, or small macros, inline functions and templates (ten or fewer lines in length), you do both of the following: a) Give prominent notice with each copy of the object code that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the object code with a copy of the GNU GPL and this license document. 4. Combined Works. You may convey a Combined Work under terms of your choice that, taken together, effectively do not restrict modification of the portions of the Library contained in the Combined Work and reverse engineering for debugging such modifications, if you also do each of the following: a) Give prominent notice with each copy of the Combined Work that the Library is used in it and that the Library and its use are covered by this License. b) Accompany the Combined Work with a copy of the GNU GPL and this license document. c) For a Combined Work that displays copyright notices during execution, include the copyright notice for the Library among these notices, as well as a reference directing the user to the copies of the GNU GPL and this license document. d) Do one of the following: 0) Convey the Minimal Corresponding Source under the terms of this License, and the Corresponding Application Code in a form suitable for, and under terms that permit, the user to recombine or relink the Application with a modified version of the Linked Version to produce a modified Combined Work, in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source. 1) Use a suitable shared library mechanism for linking with the Library. A suitable mechanism is one that (a) uses at run time a copy of the Library already present on the user's computer system, and (b) will operate properly with a modified version of the Library that is interface-compatible with the Linked Version. e) Provide Installation Information, but only if you would otherwise be required to provide such information under section 6 of the GNU GPL, and only to the extent that such information is necessary to install and execute a modified version of the Combined Work produced by recombining or relinking the Application with a modified version of the Linked Version. (If you use option 4d0, the Installation Information must accompany the Minimal Corresponding Source and Corresponding Application Code. If you use option 4d1, you must provide the Installation Information in the manner specified by section 6 of the GNU GPL for conveying Corresponding Source.) 5. Combined Libraries. You may place library facilities that are a work based on the Library side by side in a single library together with other library facilities that are not Applications and are not covered by this License, and convey such a combined library under terms of your choice, if you do both of the following: a) Accompany the combined library with a copy of the same work based on the Library, uncombined with any other library facilities, conveyed under the terms of this License. b) Give prominent notice with the combined library that part of it is a work based on the Library, and explaining where to find the accompanying uncombined form of the same work. 6. Revised Versions of the GNU Lesser General Public License. The Free Software Foundation may publish revised and/or new versions of the GNU Lesser General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Library as you received it specifies that a certain numbered version of the GNU Lesser General Public License "or any later version" applies to it, you have the option of following the terms and conditions either of that published version or of any later version published by the Free Software Foundation. If the Library as you received it does not specify a version number of the GNU Lesser General Public License, you may choose any version of the GNU Lesser General Public License ever published by the Free Software Foundation. If the Library as you received it specifies that a proxy can decide whether future versions of the GNU Lesser General Public License shall apply, that proxy's public statement of acceptance of any version is permanent authorization for you to choose that version for the Library. pebble-4.5.3/MANIFEST.in000066400000000000000000000000321365601427000145110ustar00rootroot00000000000000include version.py LICENSEpebble-4.5.3/README.rst000066400000000000000000000043711365601427000144540ustar00rootroot00000000000000Pebble ====== Pebble provides a neat API to manage threads and processes within an application. :Source: https://github.com/noxdafox/pebble :Documentation: https://pebble.readthedocs.io :Download: https://pypi.python.org/pypi/pebble |travis badge| |docs badge| .. |travis badge| image:: https://travis-ci.org/noxdafox/pebble.svg?branch=master :target: https://travis-ci.org/noxdafox/pebble :alt: Build Status .. |docs badge| image:: https://readthedocs.org/projects/pebble/badge/?version=latest :target: https://pebble.readthedocs.io :alt: Documentation Status Examples -------- Run a job in a separate thread and wait for its results. .. code:: python from pebble import concurrent @concurrent.thread def function(foo, bar=0): return foo + bar future = function(1, bar=2) result = future.result() # blocks until results are ready Run a function with a timeout of ten seconds and deal with errors. .. code:: python from pebble import concurrent from concurrent.futures import TimeoutError @concurrent.process(timeout=10) def function(foo, bar=0): return foo + bar future = function(1, bar=2) try: result = future.result() # blocks until results are ready except TimeoutError as error: print("Function took longer than %d seconds" % error.args[1]) except Exception as error: print("Function raised %s" % error) print(error.traceback) # traceback of the function Pools support workers restart, timeout for long running tasks and more. .. code:: python from pebble import ProcessPool from concurrent.futures import TimeoutError def function(foo, bar=0): return foo + bar def task_done(future): try: result = future.result() # blocks until results are ready except TimeoutError as error: print("Function took longer than %d seconds" % error.args[1]) except Exception as error: print("Function raised %s" % error) print(error.traceback) # traceback of the function with ProcessPool(max_workers=5, max_tasks=10) as pool: for i in range(0, 10): future = pool.schedule(function, args=[i], timeout=3) future.add_done_callback(task_done) pebble-4.5.3/doc/000077500000000000000000000000001365601427000135255ustar00rootroot00000000000000pebble-4.5.3/doc/Makefile000066400000000000000000000166661365601427000152040ustar00rootroot00000000000000# Makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build PAPER = BUILDDIR = _build # Internal variables. PAPEROPT_a4 = -D latex_paper_size=a4 PAPEROPT_letter = -D latex_paper_size=letter ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . # the i18n builder cannot share the environment and doctrees with the others I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . .PHONY: help help: @echo "Please use \`make ' where is one of" @echo " html to make standalone HTML files" @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" @echo " pickle to make pickle files" @echo " json to make JSON files" @echo " htmlhelp to make HTML files and a HTML help project" @echo " qthelp to make HTML files and a qthelp project" @echo " applehelp to make an Apple Help Book" @echo " devhelp to make HTML files and a Devhelp project" @echo " epub to make an epub" @echo " epub3 to make an epub3" @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" @echo " latexpdf to make LaTeX files and run them through pdflatex" @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" @echo " text to make text files" @echo " man to make manual pages" @echo " texinfo to make Texinfo files" @echo " info to make Texinfo files and run them through makeinfo" @echo " gettext to make PO message catalogs" @echo " changes to make an overview of all changed/added/deprecated items" @echo " xml to make Docutils-native XML files" @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @echo " dummy to check syntax errors of document sources" .PHONY: clean clean: rm -rf $(BUILDDIR)/* .PHONY: html html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." .PHONY: dirhtml dirhtml: $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." .PHONY: pickle pickle: $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle @echo @echo "Build finished; now you can process the pickle files." .PHONY: json json: $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json @echo @echo "Build finished; now you can process the JSON files." .PHONY: htmlhelp htmlhelp: $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp @echo @echo "Build finished; now you can run HTML Help Workshop with the" \ ".hhp project file in $(BUILDDIR)/htmlhelp." .PHONY: qthelp qthelp: $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Pebble.qhcp" @echo "To view the help file:" @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Pebble.qhc" .PHONY: applehelp applehelp: $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp @echo @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." @echo "N.B. You won't be able to view it unless you put it in" \ "~/Library/Documentation/Help or install it in your application" \ "bundle." .PHONY: devhelp devhelp: $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp @echo @echo "Build finished." @echo "To view the help file:" @echo "# mkdir -p $$HOME/.local/share/devhelp/Pebble" @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Pebble" @echo "# devhelp" .PHONY: epub epub: $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub @echo @echo "Build finished. The epub file is in $(BUILDDIR)/epub." .PHONY: epub3 epub3: $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 @echo @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." .PHONY: latex latex: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." @echo "Run \`make' in that directory to run these through (pdf)latex" \ "(use \`make latexpdf' here to do that automatically)." .PHONY: latexpdf latexpdf: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through pdflatex..." $(MAKE) -C $(BUILDDIR)/latex all-pdf @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: latexpdfja latexpdfja: $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex @echo "Running LaTeX files through platex and dvipdfmx..." $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." .PHONY: text text: $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text @echo @echo "Build finished. The text files are in $(BUILDDIR)/text." .PHONY: man man: $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man @echo @echo "Build finished. The manual pages are in $(BUILDDIR)/man." .PHONY: texinfo texinfo: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." @echo "Run \`make' in that directory to run these through makeinfo" \ "(use \`make info' here to do that automatically)." .PHONY: info info: $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo @echo "Running Texinfo files through makeinfo..." make -C $(BUILDDIR)/texinfo info @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." .PHONY: gettext gettext: $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale @echo @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." .PHONY: changes changes: $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes @echo @echo "The overview file is in $(BUILDDIR)/changes." .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @echo @echo "Link check complete; look for any errors in the above output " \ "or in $(BUILDDIR)/linkcheck/output.txt." .PHONY: doctest doctest: $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest @echo "Testing of doctests in the sources finished, look at the " \ "results in $(BUILDDIR)/doctest/output.txt." .PHONY: coverage coverage: $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." .PHONY: xml xml: $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml @echo @echo "Build finished. The XML files are in $(BUILDDIR)/xml." .PHONY: pseudoxml pseudoxml: $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml @echo @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." .PHONY: dummy dummy: $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy @echo @echo "Build finished. Dummy builder generates no files." pebble-4.5.3/doc/conf.py000066400000000000000000000237771365601427000150440ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # Pebble documentation build configuration file, created by # sphinx-quickstart on Sun Aug 28 22:21:49 2016. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import fileinput # import sys # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The encoding of source files. # # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Pebble' copyright = u'2013-2020, Matteo Cafasso' author = u'Matteo Cafasso' CWD = os.path.dirname(__file__) def package_version(): module_path = os.path.join(CWD, '..', 'pebble', '__init__.py') for line in fileinput.input(module_path): if line.startswith('__version__'): return line.split('=')[-1].strip().replace('\'', '') # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = package_version() # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # # today = '' # # Else, today_fmt is used as the format for a strftime call. # # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # The reST default role (used for this markup: `text`) to use for all # documents. # # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' html_theme_options = { 'page_width': '80%', 'github_user': 'noxdafox', 'github_repo': 'pebble', 'show_related': True, 'github_banner': True } # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. # " v documentation" by default. # # html_title = u'Pebble v3.1.15' # A shorter title for the navigation bar. Default is the same as html_title. # # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # # html_logo = None # The name of an image file (relative to this directory) to use as a favicon of # the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # # html_extra_path = [] # If not None, a 'Last updated on:' timestamp is inserted at every page # bottom, using the given strftime format. # The empty string is equivalent to '%b %d, %Y'. # # html_last_updated_fmt = None # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # # html_additional_pages = {} # If false, no module index is generated. # # html_domain_indices = True # If false, no index is generated. # # html_use_index = True # If true, the index is split into individual pages for each letter. # # html_split_index = False # If true, links to the reST sources are added to the pages. # # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Language to be used for generating the HTML full-text search index. # Sphinx supports the following languages: # 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' # 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' # # html_search_language = 'en' # A dictionary with options for the search language support, empty by default. # 'ja' uses this config value. # 'zh' user can custom change `jieba` dictionary path. # # html_search_options = {'type': 'default'} # The name of a javascript file (relative to the configuration directory) that # implements a search results scorer. If empty, the default will be used. # # html_search_scorer = 'scorer.js' # Output file base name for HTML help builder. htmlhelp_basename = 'Pebbledoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'Pebble.tex', u'Pebble Documentation', u'Matteo Cafasso', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # # latex_use_parts = False # If true, show page references after internal links. # # latex_show_pagerefs = False # If true, show URL addresses after external links. # # latex_show_urls = False # Documents to append as an appendix to all manuals. # # latex_appendices = [] # It false, will not define \strong, \code, itleref, \crossref ... but only # \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added # packages. # # latex_keep_old_macro_names = True # If false, no module index is generated. # # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pebble', u'Pebble Documentation', [author], 1) ] # If true, show URL addresses after external links. # # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'Pebble', u'Pebble Documentation', author, 'Pebble', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # # texinfo_appendices = [] # If false, no module index is generated. # # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # # texinfo_no_detailmenu = False pebble-4.5.3/doc/index.rst000066400000000000000000000414171365601427000153750ustar00rootroot00000000000000.. Pebble documentation master file, created by sphinx-quickstart on Thu Oct 17 23:52:22 2013. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. Pebble ====== .. only:: html :Release: |release| :Date: |today| Modern languages should natively support concurrency, threading and synchronization primitives. Their usage should be the most intuitive possible, yet allowing all the required flexibility. Pebble aims to help managing threads and processes in an easier way. It wraps Python's standard library threading and multiprocessing objects. `Concurrent Module` ------------------- .. decorator:: concurrent.process(timeout=None, name=None, daemon=True) Runs the decorated function in a concurrent process, taking care of the results and error management. The decorated function will return a pebble.ProcessFuture_ object. If *timeout* is set, the process will be stopped once expired and the future object will raise a *concurrent.futures.TimeoutError* exception. The *name* parameter let you define the process name. The *daemon* parameter switches between daemon and non-daemon threads. .. decorator:: concurrent.thread(name=None, daemon=True) Runs the decorated function in a concurrent thread, taking care of the results and error management. The decorated function will return a concurrent.futures.Future_ object. The *name* parameter let you define the thread name. The *daemon* parameter switches between daemon and non-daemon threads. `Pebble Module` --------------- .. class:: pebble.ProcessPool(max_workers=multiprocessing.cpu_count(), max_tasks=0, initializer=None, initargs=None) A Pool allows to schedule jobs into a Pool of Processes which will perform them concurrently. Process pools work as well as a *context manager*. *max_workers* is an integer representing the amount of desired process workers managed by the pool. If *max_tasks* is a number greater than zero, each worker will be restarted after performing an equal amount of tasks. *initializer* must be callable, if passed, it will be called every time a worker is started, receiving *initargs* as arguments. .. data:: active True if the Pool is running, false otherwise. .. function:: schedule(function, args=(), kwargs={}, timeout=None) Schedule a job within the Pool. Returns a pebble.ProcessFuture_ object representing the execution of the callable. *function* is the function which is about to be scheduled. *args* and *kwargs* will be passed to the function respectively as its arguments and keyword arguments. *timeout* is an integer or a float. If given, once expired it will force the timed out task to be interrupted and the worker will be restarted. *Future.result()* will raise *TimeoutError*, callbacks will be executed. .. function:: map(function, *iterables, chunksize=1, timeout=None) Concurrently compute the *function* using arguments from each of the iterables. Stop when the shortest iterable is exhausted. *chunksize* controls the size of the chunks the iterables will be broken into before being passed to the function. *timeout* is an integer or a float. If given, it will be assigned to chunk of the iterables. If the computation of the given chunk will last longer than *timeout*, its execution will be terminated and iterating over its result will raise *TimeoutError*. A pebble.ProcessMapFuture_ object is returned. Its *result* method will return an iterable containing the results of the computation in the same order as they were given. .. function:: close() No more job will be allowed into the Pool, queued jobs will be consumed. To ensure all the jobs are performed call *ProcessPool.join()* just after closing the Pool. .. function:: stop() The Pool will be stopped abruptly. All enqueued and running jobs will be lost. To ensure the Pool to be released call *ProcessPool.join()* after stopping the Pool. .. function:: join(timeout=None) Waits for all workers to exit, must not be called before calling either *close()* or *stop()*. If *timeout* is set and some worker is still running after it expired, a TimeoutError will be raised. The *join* function must be called only in the main loop. Calling it in a pebble.ProcessFuture_ callback will result in a deadlock. .. class:: pebble.ThreadPool(max_workers=multiprocessing.cpu_count(), max_tasks=0, initializer=None, initargs=None) A ThreadPool allows to schedule jobs into a Pool of Threads which will perform them concurrently. Thread pools work as well as a *context manager*. *max_workers* is an integer representing the amount of desired process workers managed by the pool. If *max_tasks* is a number greater than zero, each worker will be restarted after performing an equal amount of tasks. *initializer* must be callable, if passed, it will be called every time a worker is started, receiving *initargs* as arguments. .. data:: active True if the Pool is running, false otherwise. .. function:: schedule(function, args=(), kwargs={}) Schedule a job within the Pool. Returns a concurrent.futures.Future_ object representing the execution of the callable. *function* is the function which is about to be scheduled. *args* and *kwargs* will be passed to the function respectively as its arguments and keyword arguments. .. function:: map(function, *iterables, chunksize=1) Concurrently compute the *function* using arguments from each of the iterables. Stop when the shortest iterable is exhausted. *chunksize* controls the size of the chunks the iterables will be broken into before being passed to the function. *timeout* is an integer or a float. If given, it will be assigned to every chunk of the iterables. If the computation of the given chunk will last longer than *timeout*, iterating over its result will raise *TimeoutError*. A pebble.MapFuture_ object is returned. Its *result* method will return an iterable containing the results of the computation in the same order as they were given. .. function:: close() No more job will be allowed into the Pool, queued jobs will be consumed. To ensure all the jobs are performed call *ThreadPool.join()* just after closing the Pool. .. function:: stop() The ongoing jobs will be performed, all the enqueued ones dropped; this is a fast way to terminate the Pool. To ensure the Pool to be released call *ThreadPool.join()* after stopping the Pool. .. function:: join(timeout=None) Waits for all workers to exit, must not be called before calling either *close()* or *stop()*. If *timeout* is set and some worker is still running after it expired, a TimeoutError will be raised. The *join* function must be called only in the main loop. Calling it in a pebble.ProcessFuture_ callback will result in a deadlock. .. decorator:: pebble.synchronized([lock]) A synchronized function prevents two or more callers to interleave its execution preventing race conditions. The *synchronized* decorator accepts as optional parameter a *Lock*, *RLock* or *Semaphore* from *threading* and *multiprocessing* modules. If no synchronization object is given, a *threading.Lock* will be employed. This implies that between different decorated functions only one at a time will be executed. .. decorator:: pebble.sighandler(signals) Convenience decorator for setting the decorated *function* as signal handler for the specified *signals*. *signals* can either be a single signal or a list/tuple of signals. .. function:: pebble.waitforthreads(threads, timeout=None) Waits for one or more *Thread* to exit or until *timeout* expires. *threads* is a list containing one or more *threading.Thread* objects. If *timeout* is not None the function will block for the specified amount of seconds returning an empty list if no *Thread* is ready. The function returns a list containing the ready *Threads*. .. note:: Expired *Threads* are not joined by *waitforthreads*. .. function:: pebble.waitforqueues(queues, timeout=None) Waits for one or more *Queue* to be ready or until *timeout* expires. *queues* is a list containing one or more *Queue.Queue* objects. If *timeout* is not None the function will block for the specified amount of seconds returning an empty list if no *Queue* is ready. The function returns a list containing the ready *Queues*. .. _pebble.ProcessFuture: .. class:: pebble.ProcessFuture() This class inherits from concurrent.futures.Future_. The sole difference with the parent class is the possibility to cancel running calls. .. function:: cancel() Cancel a running or enqueued call. If the call has already completed then the method will return False, otherwise the call will be cancelled and the method will return True. If the call is running, the process executing it will be stopped allowing to reclaim its resources. .. _pebble.MapFuture: .. class:: pebble.MapFuture() This class inherits from concurrent.futures.Future_. It is returned by the *map* function of a *ThreadPool*. .. function:: result() Returns an iterator over the results of the *map* function. If a call raises an exception, then that exception will be raised when its value is retrieved from the iterator. The returned iterator raises a concurrent.futures.TimeoutError if __next__() is called and the result isn’t available after timeout seconds from the original call to Pool.map(). .. function:: cancel() Cancel the computation of enqueued element of the iterables passed to the *map* function. If all the elements are already in progress or completed then the method will return False. True is returned otherwise. .. _pebble.ProcessMapFuture: .. class:: pebble.ProcessMapFuture() This class inherits from pebble.ProcessFuture_. It is returned by the *map* function of a *ProcessPool*. .. function:: result() Returns an iterator over the results of the *map* function. If a call raises an exception, then that exception will be raised when its value is retrieved from the iterator. The returned iterator raises a concurrent.futures.TimeoutError if __next__() is called and the result isn’t available after timeout seconds from the original call to Pool.map(). .. function:: cancel() Cancel the computation of running or enqueued element of the iterables passed to the *map* function. If all the elements are already completed then the method will return False. True is returned otherwise. .. exception:: pebble.ProcessExpired Raised by *Future.result()* functions if the related process died unexpectedly during the execution. .. data:: exitcode Integer representing the process' exit code. General notes ------------- Processes +++++++++ The Python's multiprocessing guidelines apply as well for all functionalities within the *process* namespace. Examples -------- Concurrent decorators +++++++++++++++++++++ Run a function in a separate process and wait for its results. :: from pebble import concurrent @concurrent.process def function(arg, kwarg=0): return arg + kwarg future = function(1, kwarg=1) print(future.result()) Quite often developers need to integrate in their projects third party code which appears to be unstable, to leak memory or to hang. The concurrent function allows to easily take advantage of the isolation offered by processes without the need of handling any multiprocessing primitive. :: from pebble import concurrent from concurrent.futures import TimeoutError from third_party_lib import unstable_function @concurrent.process(timeout=10) def function(arg, kwarg=0): unstable_function(arg, kwarg=kwarg) future = function(1, kwarg=1) try: results = future.result() except TimeoutError as error: print("unstable_function took longer than %d seconds" % error.args[1]) except ProcessExpired as error: print("%s. Exit code: %d" % (error, error.exitcode)) except Exception as error: print("unstable_function raised %s" % error) print(error.traceback) # Python's traceback of remote process Pools +++++ The *ProcessPool* has been designed to support task timeouts and critical errors. If a task reaches its timeout, the worker will be interrupted immediately. Abrupt interruptions of the workers are dealt trasparently. The *map* function returns a *Future* object to better control its execution. When the first result is ready, the *result* function will return an iterator. The iterator can be used to retrieve the results no matter their outcome. :: from concurrent.futures import TimeoutError from pebble import ProcessPool, ProcessExpired def function(n): return n with ProcessPool() as pool: future = pool.map(function, range(100), timeout=10) iterator = future.result() while True: try: result = next(iterator) except StopIteration: break except TimeoutError as error: print("function took longer than %d seconds" % error.args[1]) except ProcessExpired as error: print("%s. Exit code: %d" % (error, error.exitcode)) except Exception as error: print("function raised %s" % error) print(error.traceback) # Python's traceback of remote process The following example shows how to compute the Fibonacci sequence up to a certain duration after which, all the remaining computations will be cancelled as they would timeout anyway. :: from pebble import ProcessPool from concurrent.futures import TimeoutError def fibonacci(n): if n == 0: return 0 elif n == 1: return 1 else: return fibonacci(n - 1) + fibonacci(n - 2) with ProcessPool() as pool: future = pool.map(fibonacci, range(50), timeout=10) try: for n in future.result(): print(n) except TimeoutError: print("TimeoutError: aborting remaining computations") future.cancel() To compute large collections of elements without incurring in IPC performance limitations, it is possible to use the *chunksize* parameter of the *map* function. :: from pebble import ProcessPool from multiprocessing import cpu_count from concurrent.futures import TimeoutError def function(n): return n elements = list(range(1000000)) cpus = cpu_count() size = len(elements) chunksize = size / cpus # the timeout will be assigned to each chunk # therefore, we need to consider its size timeout = 10 * chunksize with ProcessPool(max_workers=cpus) as pool: future = pool.map(function, elements, chunksize=chunksize, timeout=timeout) assert list(future.result()) == elements Control process resources usage ******************************* By combining the *resource* module and the *ProcessPool* initializer function, it is possible to control the amount of resources each process can consume. In the following example, the memory consumption of each worker process is limited to 1 Kb. :: import resource from pebble import ProcessPool MAX_MEM = 1024 def initializer(limit): """Set maximum amount of memory each worker process can allocate.""" soft, hard = resource.getrlimit(resource.RLIMIT_AS) resource.setrlimit(resource.RLIMIT_AS, (limit, hard)) def function(): """This function tries to allocate 1Mb worth of string.""" string = '' for _ in range(1024): string += 1024 * 'A' pool = ProcessPool(initializer=initializer, initargs=(MAX_MEM,)) future = pool.schedule(function) assert isinstance(future.exception(), MemoryError) Sighandler decorator ++++++++++++++++++++ The syntax :: import signal from pebble import sighandler @sighandler((signal.SIGINT, signal.SIGTERM)) def signal_handler(signum, frame): print("Termination request received!") Is equivalent to :: import signal def signal_handler(signum, frame): print("Termination request received!") signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) Running the tests ----------------- On Python 3, the tests will cover all the multiprocessing starting methods supported by the platform. Due to multiprocessing limitations, it is not possible to change the starting method once set. Therefore, test frameworks such as nose and pytest which run all the tests in a single process will fail. Please refer to the `test/run-test.sh` bash script to see how to run the tests. .. _concurrent.futures.Future: https://docs.python.org/3/library/concurrent.futures.html#future-objects .. toctree:: :maxdepth: 2 pebble-4.5.3/pebble/000077500000000000000000000000001365601427000142115ustar00rootroot00000000000000pebble-4.5.3/pebble/__init__.py000066400000000000000000000011241365601427000163200ustar00rootroot00000000000000__author__ = 'Matteo Cafasso' __version__ = '4.5.3' __license__ = 'LGPL' __all__ = ['waitforthreads', 'waitforqueues', 'synchronized', 'sighandler', 'ProcessFuture', 'MapFuture', 'ProcessMapFuture', 'ProcessExpired', 'ProcessPool', 'ThreadPool'] from pebble.decorators import synchronized, sighandler from pebble.common import ProcessExpired, ProcessFuture from pebble.functions import waitforqueues, waitforthreads from pebble.pool import ThreadPool, ProcessPool, MapFuture, ProcessMapFuture pebble-4.5.3/pebble/common.py000066400000000000000000000131011365601427000160470ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . from __future__ import absolute_import import os import pickle import signal from threading import Thread from traceback import format_exc from multiprocessing import Process from concurrent.futures import Future class ProcessExpired(OSError): """Raised when process dies unexpectedly.""" def __init__(self, msg, code=0): super(ProcessExpired, self).__init__(msg) self.exitcode = code class PebbleFuture(Future): # Same as base class, removed logline def set_running_or_notify_cancel(self): """Mark the future as running or process any cancel notifications. Should only be used by Executor implementations and unit tests. If the future has been cancelled (cancel() was called and returned True) then any threads waiting on the future completing (though calls to as_completed() or wait()) are notified and False is returned. If the future was not cancelled then it is put in the running state (future calls to running() will return True) and True is returned. This method should be called by Executor implementations before executing the work associated with this future. If this method returns False then the work should not be executed. Returns: False if the Future was cancelled, True otherwise. Raises: RuntimeError: if set_result() or set_exception() was called. """ with self._condition: if self._state == CANCELLED: self._state = CANCELLED_AND_NOTIFIED for waiter in self._waiters: waiter.add_cancelled(self) return False elif self._state == PENDING: self._state = RUNNING return True else: raise RuntimeError('Future in unexpected state') class ProcessFuture(PebbleFuture): def cancel(self): """Cancel the future. Returns True if the future was cancelled, False otherwise. A future cannot be cancelled if it has already completed. """ with self._condition: if self._state == FINISHED: return False if self._state in (CANCELLED, CANCELLED_AND_NOTIFIED): return True self._state = CANCELLED self._condition.notify_all() self._invoke_callbacks() return True class RemoteTraceback(Exception): """Traceback wrapper for exceptions in remote process. Exception.__cause__ requires a BaseException subclass. """ def __init__(self, traceback): self.traceback = traceback def __str__(self): return self.traceback class RemoteException(object): """Pickling wrapper for exceptions in remote process.""" def __init__(self, exception, traceback): self.exception = exception self.traceback = traceback def __reduce__(self): return rebuild_exception, (self.exception, self.traceback) def rebuild_exception(exception, traceback): exception.__cause__ = RemoteTraceback(traceback) return exception def launch_thread(name, function, daemon, *args, **kwargs): thread = Thread(target=function, name=name, args=args, kwargs=kwargs) thread.daemon = daemon thread.start() return thread def launch_process(name, function, daemon, *args, **kwargs): process = Process(target=function, name=name, args=args, kwargs=kwargs) process.daemon = daemon process.start() return process def stop_process(process): """Does its best to stop the process.""" process.terminate() process.join(3) if process.is_alive() and os.name != 'nt': try: os.kill(process.pid, signal.SIGKILL) process.join() except OSError: return if process.is_alive(): raise RuntimeError("Unable to terminate PID %d" % os.getpid()) def execute(function, *args, **kwargs): """Runs the given function returning its results or exception.""" try: return function(*args, **kwargs) except Exception as error: error.traceback = format_exc() return error def process_execute(function, *args, **kwargs): """Runs the given function returning its results or exception.""" try: return function(*args, **kwargs) except Exception as error: error.traceback = format_exc() return RemoteException(error, error.traceback) def send_result(pipe, data): """Send result handling pickling and communication errors.""" try: pipe.send(data) except (pickle.PicklingError, TypeError) as error: error.traceback = format_exc() pipe.send(RemoteException(error, error.traceback)) SLEEP_UNIT = 0.1 # Borrowed from concurrent.futures PENDING = 'PENDING' RUNNING = 'RUNNING' FINISHED = 'FINISHED' CANCELLED = 'CANCELLED' CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED' pebble-4.5.3/pebble/concurrent/000077500000000000000000000000001365601427000163735ustar00rootroot00000000000000pebble-4.5.3/pebble/concurrent/__init__.py000066400000000000000000000002061365601427000205020ustar00rootroot00000000000000__all__ = ['thread', 'process'] from pebble.concurrent.thread import thread from pebble.concurrent.process import process pebble-4.5.3/pebble/concurrent/process.py000066400000000000000000000132151365601427000204250ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . import os import sys import signal from itertools import count from functools import wraps from multiprocessing import Pipe from concurrent.futures import CancelledError, TimeoutError try: from multiprocessing import get_start_method except ImportError: def get_start_method(): return 'spawn' if os.name == 'nt' else 'fork' from pebble.common import ProcessExpired, ProcessFuture from pebble.common import launch_process, stop_process, SLEEP_UNIT from pebble.common import process_execute, launch_thread, send_result def process(*args, **kwargs): """Runs the decorated function in a concurrent process, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. The timeout parameter will set a maximum execution time for the decorated function. If the execution exceeds the timeout, the process will be stopped and the Future will raise TimeoutError. The name parameter will set the process name. """ timeout = kwargs.get('timeout') name = kwargs.get('name') daemon = kwargs.get('daemon', True) # decorator without parameters if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return _process_wrapper(args[0], timeout, name, daemon) else: # decorator with parameters if timeout is not None and not isinstance(timeout, (int, float)): raise TypeError('Timeout expected to be None or integer or float') if name is not None and not isinstance(name, str): raise TypeError('Name expected to be None or string') if daemon is not None and not isinstance(daemon, bool): raise TypeError('Daemon expected to be None or bool') def decorating_function(function): return _process_wrapper(function, timeout, name, daemon) return decorating_function def _process_wrapper(function, timeout, name, daemon): _register_function(function) @wraps(function) def wrapper(*args, **kwargs): future = ProcessFuture() reader, writer = Pipe(duplex=False) if get_start_method() != 'fork': target = _trampoline args = [function.__name__, function.__module__] + list(args) else: target = function worker = launch_process( name, _function_handler, daemon, target, args, kwargs, writer) writer.close() future.set_running_or_notify_cancel() launch_thread(name, _worker_handler, True, future, worker, reader, timeout) return future return wrapper def _worker_handler(future, worker, pipe, timeout): """Worker lifecycle manager. Waits for the worker to be perform its task, collects result, runs the callback and cleans up the process. """ result = _get_result(future, pipe, timeout) if isinstance(result, BaseException): if isinstance(result, ProcessExpired): result.exitcode = worker.exitcode future.set_exception(result) else: future.set_result(result) if worker.is_alive(): stop_process(worker) def _function_handler(function, args, kwargs, pipe): """Runs the actual function in separate process and returns its result.""" signal.signal(signal.SIGINT, signal.SIG_IGN) result = process_execute(function, *args, **kwargs) send_result(pipe, result) def _get_result(future, pipe, timeout): """Waits for result and handles communication errors.""" counter = count(step=SLEEP_UNIT) try: while not pipe.poll(SLEEP_UNIT): if timeout is not None and next(counter) >= timeout: return TimeoutError('Task Timeout', timeout) elif future.cancelled(): return CancelledError() return pipe.recv() except (EOFError, OSError): return ProcessExpired('Abnormal termination') except Exception as error: return error ################################################################################ # Spawn process start method handling logic ################################################################################ _registered_functions = {} def _register_function(function): global _registered_functions _registered_functions[function.__name__] = function def _trampoline(name, module, *args, **kwargs): """Trampoline function for decorators. Lookups the function between the registered ones; if not found, forces its registering and then executes it. """ function = _function_lookup(name, module) return function(*args, **kwargs) def _function_lookup(name, module): """Searches the function between the registered ones. If not found, it imports the module forcing its registration. """ try: return _registered_functions[name] except KeyError: # force function registering __import__(module) mod = sys.modules[module] getattr(mod, name) return _registered_functions[name] pebble-4.5.3/pebble/concurrent/thread.py000066400000000000000000000045621365601427000202230ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . from functools import wraps from traceback import format_exc from concurrent.futures import Future from pebble.common import launch_thread def thread(*args, **kwargs): """Runs the decorated function within a concurrent thread, taking care of the result and error management. Decorated functions will return a concurrent.futures.Future object once called. The name parameter will set the process name. """ name = kwargs.get('name') daemon = kwargs.get('daemon', True) if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): return _thread_wrapper(args[0], name, daemon) else: # decorator with parameters if name is not None and not isinstance(name, str): raise TypeError('Name expected to be None or string') if daemon is not None and not isinstance(daemon, bool): raise TypeError('Daemon expected to be None or bool') def decorating_function(function): return _thread_wrapper(function, name, daemon) return decorating_function def _thread_wrapper(function, name, daemon): @wraps(function) def wrapper(*args, **kwargs): future = Future() launch_thread(name, _function_handler, daemon, function, args, kwargs, future) return future return wrapper def _function_handler(function, args, kwargs, future): """Runs the actual function in separate thread and returns its result.""" future.set_running_or_notify_cancel() try: result = function(*args, **kwargs) except BaseException as error: error.traceback = format_exc() future.set_exception(error) else: future.set_result(result) pebble-4.5.3/pebble/decorators.py000066400000000000000000000044321365601427000167330ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . import signal import threading from functools import wraps _synchronized_lock = threading.Lock() def synchronized(*args): """A synchronized function prevents two or more callers to interleave its execution preventing race conditions. The synchronized decorator accepts as optional parameter a Lock, RLock or Semaphore object which will be employed to ensure the function's atomicity. If no synchronization object is given, a single threading.Lock will be used. This implies that between different decorated function only one at a time will be executed. """ if callable(args[0]): return decorate_synchronized(args[0], _synchronized_lock) else: def wrap(function): return decorate_synchronized(function, args[0]) return wrap def decorate_synchronized(function, lock): @wraps(function) def wrapper(*args, **kwargs): with lock: return function(*args, **kwargs) return wrapper def sighandler(signals): """Sets the decorated function as signal handler of given *signals*. *signals* can be either a single signal or a list/tuple of multiple ones. """ def wrap(function): set_signal_handlers(signals, function) @wraps(function) def wrapper(*args, **kwargs): return function(*args, **kwargs) return wrapper return wrap def set_signal_handlers(signals, function): if isinstance(signals, (list, tuple)): for signum in signals: signal.signal(signum, function) else: signal.signal(signals, function) pebble-4.5.3/pebble/functions.py000066400000000000000000000101021365601427000165650ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . import threading from time import time from types import MethodType _waitforthreads_lock = threading.Lock() def waitforqueues(queues, timeout=None): """Waits for one or more *Queue* to be ready or until *timeout* expires. *queues* is a list containing one or more *Queue.Queue* objects. If *timeout* is not None the function will block for the specified amount of seconds. The function returns a list containing the ready *Queues*. """ lock = threading.Condition(threading.Lock()) prepare_queues(queues, lock) try: wait_queues(queues, lock, timeout) finally: reset_queues(queues) return filter(lambda q: not q.empty(), queues) def prepare_queues(queues, lock): """Replaces queue._put() method in order to notify the waiting Condition.""" for queue in queues: queue._pebble_lock = lock with queue.mutex: queue._pebble_old_method = queue._put queue._put = MethodType(new_method, queue) def wait_queues(queues, lock, timeout): with lock: if not any(map(lambda q: not q.empty(), queues)): lock.wait(timeout) def reset_queues(queues): """Resets original queue._put() method.""" for queue in queues: with queue.mutex: queue._put = queue._pebble_old_method delattr(queue, '_pebble_old_method') delattr(queue, '_pebble_lock') def waitforthreads(threads, timeout=None): """Waits for one or more *Thread* to exit or until *timeout* expires. .. note:: Expired *Threads* are not joined by *waitforthreads*. *threads* is a list containing one or more *threading.Thread* objects. If *timeout* is not None the function will block for the specified amount of seconds. The function returns a list containing the ready *Threads*. """ old_function = None lock = threading.Condition(threading.Lock()) def new_function(*args): old_function(*args) with lock: lock.notify_all() old_function = prepare_threads(new_function) try: wait_threads(threads, lock, timeout) finally: reset_threads(old_function) return filter(lambda t: not t.is_alive(), threads) def prepare_threads(new_function): """Replaces threading._get_ident() function in order to notify the waiting Condition.""" with _waitforthreads_lock: if hasattr(threading, 'get_ident'): old_function = threading.get_ident threading.get_ident = new_function else: old_function = threading._get_ident threading._get_ident = new_function return old_function def wait_threads(threads, lock, timeout): timestamp = time() with lock: while not any(map(lambda t: not t.is_alive(), threads)): if timeout is None: lock.wait() elif timeout - (time() - timestamp) > 0: lock.wait(timeout - (time() - timestamp)) else: return def reset_threads(old_function): """Resets original threading._get_ident() function.""" with _waitforthreads_lock: if hasattr(threading, 'get_ident'): threading.get_ident = old_function else: threading._get_ident = old_function def new_method(self, *args): self._pebble_old_method(*args) with self._pebble_lock: self._pebble_lock.notify_all() pebble-4.5.3/pebble/pool/000077500000000000000000000000001365601427000151625ustar00rootroot00000000000000pebble-4.5.3/pebble/pool/__init__.py000066400000000000000000000004001365601427000172650ustar00rootroot00000000000000__all__ = ['ThreadPool', 'ProcessPool', 'MapFuture', 'ProcessMapFuture'] from pebble.pool.thread import ThreadPool from pebble.pool.process import ProcessPool from pebble.pool.base_pool import MapFuture, ProcessMapFuture pebble-4.5.3/pebble/pool/base_pool.py000066400000000000000000000154301365601427000175020ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . import time import logging from threading import RLock from collections import namedtuple from itertools import chain, count, islice from concurrent.futures import TimeoutError try: from queue import Queue except ImportError: from Queue import Queue from pebble.common import PebbleFuture, ProcessFuture, SLEEP_UNIT class BasePool(object): def __init__(self, max_workers, max_tasks, initializer, initargs): self._context = PoolContext( max_workers, max_tasks, initializer, initargs) self._loops = () self._task_counter = count() def __enter__(self): return self def __exit__(self, *args): self.close() self.join() @property def active(self): self._update_pool_state() return self._context.state in (CLOSED, RUNNING) def close(self): """Closes the Pool preventing new tasks from being accepted. Pending tasks will be completed. """ self._context.state = CLOSED def stop(self): """Stops the pool without performing any pending task.""" self._context.state = STOPPED def join(self, timeout=None): """Joins the pool waiting until all workers exited. If *timeout* is set, it block until all workers are done or raises TimeoutError. """ if self._context.state == RUNNING: raise RuntimeError('The Pool is still running') if self._context.state == CLOSED: self._wait_queue_depletion(timeout) self.stop() self.join() else: self._context.task_queue.put(None) self._stop_pool() def _wait_queue_depletion(self, timeout): tick = time.time() while self.active: if timeout is not None and time.time() - tick > timeout: raise TimeoutError("Tasks are still being executed") elif self._context.task_queue.unfinished_tasks: time.sleep(SLEEP_UNIT) else: return def _check_pool_state(self): self._update_pool_state() if self._context.state == ERROR: raise RuntimeError('Unexpected error within the Pool') elif self._context.state != RUNNING: raise RuntimeError('The Pool is not active') def _update_pool_state(self): if self._context.state == CREATED: self._start_pool() for loop in self._loops: if not loop.is_alive(): self._context.state = ERROR def _start_pool(self): raise NotImplementedError("Not implemented") def _stop_pool(self): raise NotImplementedError("Not implemented") class PoolContext(object): def __init__(self, max_workers, max_tasks, initializer, initargs): self._state = CREATED self.state_mutex = RLock() self.task_queue = Queue() self.workers = max_workers self.task_counter = count() self.worker_parameters = Worker(max_tasks, initializer, initargs) @property def state(self): return self._state @state.setter def state(self, state): with self.state_mutex: if self.alive: self._state = state @property def alive(self): return self.state not in (ERROR, STOPPED) class Task: def __init__(self, identifier, future, timeout, payload): self.id = identifier self.future = future self.timeout = timeout self.payload = payload self.timestamp = 0 self.worker_id = 0 @property def started(self): return bool(self.timestamp > 0) def set_running_or_notify_cancel(self): if hasattr(self.future, 'map_future'): if not self.future.map_future.done(): try: self.future.map_future.set_running_or_notify_cancel() except RuntimeError: pass try: self.future.set_running_or_notify_cancel() except RuntimeError: pass class MapFuture(PebbleFuture): def __init__(self, futures): super(MapFuture, self).__init__() self._futures = futures def cancel(self): """Cancel the future. Returns True if any of the elements of the iterables is cancelled. False otherwise. """ super(MapFuture, self).cancel() return any(tuple(f.cancel() for f in self._futures)) class ProcessMapFuture(ProcessFuture): def __init__(self, futures): super(ProcessMapFuture, self).__init__() self._futures = futures def cancel(self): """Cancel the future. Returns True if any of the elements of the iterables is cancelled. False otherwise. """ super(ProcessMapFuture, self).cancel() return any(tuple(f.cancel() for f in self._futures)) class MapResults: def __init__(self, futures, timeout=None): self._timeout = timeout self._results = chain.from_iterable(chunk_result(f) for f in futures) def __iter__(self): return self def next(self): result = next(self._results) if isinstance(result, Exception): raise result return result __next__ = next def iter_chunks(chunksize, *iterables): """Iterates over zipped iterables in chunks.""" iterables = iter(zip(*iterables)) while 1: chunk = tuple(islice(iterables, chunksize)) if not chunk: return yield chunk def chunk_result(future): """Returns the results of a processed chunk.""" try: return future.result() except Exception as error: return (error, ) def run_initializer(initializer, initargs): """Runs the Pool initializer dealing with errors.""" try: initializer(*initargs) return True except Exception as error: logging.exception(error) return False # Pool states CREATED = 0 RUNNING = 1 CLOSED = 2 STOPPED = 3 ERROR = 4 Worker = namedtuple('Worker', ('max_tasks', 'initializer', 'initargs')) TaskPayload = namedtuple('TaskPayload', ('function', 'args', 'kwargs')) pebble-4.5.3/pebble/pool/channel.py000066400000000000000000000117511365601427000171510ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . import os import select from contextlib import contextmanager from multiprocessing import RLock, Pipe class ChannelError(OSError): """Error occurring within the process channel.""" pass def channels(): read0, write0 = Pipe(duplex=False) read1, write1 = Pipe(duplex=False) return Channel(read1, write0), WorkerChannel(read0, write1) class Channel(object): def __init__(self, reader, writer): self.reader = reader self.writer = writer self.poll = self._make_poll_method() def _make_poll_method(self): def unix_poll(timeout=None): poll = select.poll() poll.register(self.reader) try: return bool(poll.poll(timeout)) except OSError: raise except select.error as err: # Python 2 error = OSError(err.args[1]) error.errno = err.args[0] raise error def windows_poll(timeout=None): return self.reader.poll(timeout) return os.name != 'nt' and unix_poll or windows_poll def recv(self): return self.reader.recv() def send(self, obj): return self.writer.send(obj) def close(self): self.reader.close() self.writer.close() class WorkerChannel(Channel): def __init__(self, reader, writer): super(WorkerChannel, self).__init__(reader, writer) self.mutex = ChannelMutex() self.recv = self._make_recv_method() self.send = self._make_send_method() def __getstate__(self): return self.reader, self.writer, self.mutex def __setstate__(self, state): self.reader, self.writer, self.mutex = state self.poll = self._make_poll_method() self.recv = self._make_recv_method() self.send = self._make_send_method() def _make_recv_method(self): def recv(): with self.mutex.reader: return self.reader.recv() return recv def _make_send_method(self): def unix_send(obj): with self.mutex.writer: return self.writer.send(obj) def windows_send(obj): return self.writer.send(obj) return os.name != 'nt' and unix_send or windows_send @property @contextmanager def lock(self): with self.mutex: yield self class ChannelMutex: def __init__(self): self.reader_mutex = RLock() self.writer_mutex = os.name != 'nt' and RLock() or None self.acquire = self._make_acquire_method() self.release = self._make_release_method() def __getstate__(self): return self.reader_mutex, self.writer_mutex def __setstate__(self, state): self.reader_mutex, self.writer_mutex = state self.acquire = self._make_acquire_method() self.release = self._make_release_method() def __enter__(self): if self.acquire(): return self else: raise ChannelError("Channel mutex time out") def __exit__(self, *_): self.release() def _make_acquire_method(self): def unix_acquire(): return (self.reader_mutex.acquire(timeout=LOCK_TIMEOUT) and self.writer_mutex.acquire(timeout=LOCK_TIMEOUT)) def windows_acquire(): return self.reader_mutex.acquire(timeout=LOCK_TIMEOUT) return os.name != 'nt' and unix_acquire or windows_acquire def _make_release_method(self): def unix_release(): self.reader_mutex.release() self.writer_mutex.release() def windows_release(): self.reader_mutex.release() return os.name != 'nt' and unix_release or windows_release @property @contextmanager def reader(self): if self.reader_mutex.acquire(timeout=LOCK_TIMEOUT): try: yield self finally: self.reader_mutex.release() else: raise ChannelError("Channel mutex time out") @property @contextmanager def writer(self): if self.writer_mutex.acquire(timeout=LOCK_TIMEOUT): try: yield self finally: self.writer_mutex.release() else: raise ChannelError("Channel mutex time out") LOCK_TIMEOUT = 60 pebble-4.5.3/pebble/pool/process.py000066400000000000000000000364061365601427000172230ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . import os import time import pickle from itertools import count from collections import namedtuple from multiprocessing import cpu_count from signal import SIG_IGN, SIGINT, signal from concurrent.futures import CancelledError, TimeoutError try: from concurrent.futures.process import BrokenProcessPool except ImportError: class BrokenProcessPool(OSError): pass from pebble.pool.channel import ChannelError, channels from pebble.pool.base_pool import BasePool, Task, TaskPayload from pebble.pool.base_pool import ProcessMapFuture, MapResults from pebble.pool.base_pool import iter_chunks, run_initializer from pebble.pool.base_pool import CREATED, ERROR, RUNNING, SLEEP_UNIT from pebble.common import launch_process, stop_process from pebble.common import ProcessExpired, ProcessFuture from pebble.common import process_execute, launch_thread class ProcessPool(BasePool): """Allows to schedule jobs within a Pool of Processes. max_workers is an integer representing the amount of desired process workers managed by the pool. If max_tasks is a number greater than zero, each worker will be restarted after performing an equal amount of tasks. initializer must be callable, if passed, it will be called every time a worker is started, receiving initargs as arguments. """ def __init__(self, max_workers=cpu_count(), max_tasks=0, initializer=None, initargs=()): super(ProcessPool, self).__init__( max_workers, max_tasks, initializer, initargs) self._pool_manager = PoolManager(self._context) def _start_pool(self): with self._context.state_mutex: if self._context.state == CREATED: self._pool_manager.start() self._loops = (launch_thread(None, task_scheduler_loop, True, self._pool_manager), launch_thread(None, pool_manager_loop, True, self._pool_manager), launch_thread(None, message_manager_loop, True, self._pool_manager)) self._context.state = RUNNING def _stop_pool(self): self._pool_manager.close() for loop in self._loops: loop.join() self._pool_manager.stop() def schedule(self, function, args=(), kwargs={}, timeout=None): """Schedules *function* to be run the Pool. *args* and *kwargs* will be forwareded to the scheduled function respectively as arguments and keyword arguments. *timeout* is an integer, if expires the task will be terminated and *Future.result()* will raise *TimeoutError*. A *pebble.ProcessFuture* object is returned. """ self._check_pool_state() future = ProcessFuture() payload = TaskPayload(function, args, kwargs) task = Task(next(self._task_counter), future, timeout, payload) self._context.task_queue.put(task) return future def map(self, function, *iterables, **kwargs): """Computes the *function* using arguments from each of the iterables. Stops when the shortest iterable is exhausted. *timeout* is an integer, if expires the task will be terminated and the call to next will raise *TimeoutError*. The *timeout* is applied to each chunk of the iterable. *chunksize* controls the size of the chunks the iterable will be broken into before being passed to the function. A *pebble.ProcessFuture* object is returned. """ self._check_pool_state() timeout = kwargs.get('timeout') chunksize = kwargs.get('chunksize', 1) if chunksize < 1: raise ValueError("chunksize must be >= 1") futures = [self.schedule( process_chunk, args=(function, chunk), timeout=timeout) for chunk in iter_chunks(chunksize, *iterables)] map_future = ProcessMapFuture(futures) if not futures: map_future.set_result(MapResults(futures)) return map_future def done_map(_): if not map_future.done(): map_future.set_result(MapResults(futures)) for future in futures: future.add_done_callback(done_map) setattr(future, 'map_future', map_future) return map_future def task_scheduler_loop(pool_manager): context = pool_manager.context task_queue = context.task_queue try: while context.alive: task = task_queue.get() if task is not None: if task.future.cancelled(): task.set_running_or_notify_cancel() task_queue.task_done() else: pool_manager.schedule(task) else: task_queue.task_done() except BrokenProcessPool: context.state = ERROR def pool_manager_loop(pool_manager): context = pool_manager.context try: while context.alive: pool_manager.update_status() time.sleep(SLEEP_UNIT) except BrokenProcessPool: context.state = ERROR def message_manager_loop(pool_manager): context = pool_manager.context try: while context.alive: pool_manager.process_next_message(SLEEP_UNIT) except BrokenProcessPool: context.state = ERROR class PoolManager: """Combines Task and Worker Managers providing a higher level one.""" def __init__(self, context): self.context = context self.task_manager = TaskManager(context.task_queue.task_done) self.worker_manager = WorkerManager(context.workers, context.worker_parameters) def start(self): self.worker_manager.create_workers() def close(self): self.worker_manager.close_channels() def stop(self): self.worker_manager.stop_workers() def schedule(self, task): """Schedules a new Task in the PoolManager.""" self.task_manager.register(task) try: self.worker_manager.dispatch(task) except (pickle.PicklingError, TypeError) as error: self.task_manager.task_problem(task.id, error) def process_next_message(self, timeout): """Processes the next message coming from the workers.""" message = self.worker_manager.receive(timeout) if isinstance(message, Acknowledgement): self.task_manager.task_start(message.task, message.worker) elif isinstance(message, Result): self.task_manager.task_done(message.task, message.result) elif isinstance(message, Problem): self.task_manager.task_problem(message.task, message.error) def update_status(self): self.update_tasks() self.update_workers() def update_tasks(self): """Handles timing out Tasks.""" for task in self.task_manager.timeout_tasks(): self.task_manager.task_done( task.id, TimeoutError("Task timeout", task.timeout)) self.worker_manager.stop_worker(task.worker_id) for task in self.task_manager.cancelled_tasks(): self.task_manager.task_done( task.id, CancelledError()) self.worker_manager.stop_worker(task.worker_id) def update_workers(self): """Handles unexpected processes termination.""" for expiration in self.worker_manager.inspect_workers(): self.handle_worker_expiration(expiration) self.worker_manager.create_workers() def handle_worker_expiration(self, expiration): worker_id, exitcode = expiration try: task = self.find_expired_task(worker_id) except LookupError: return else: error = ProcessExpired('Abnormal termination', code=exitcode) self.task_manager.task_done(task.id, error) def find_expired_task(self, worker_id): tasks = tuple(self.task_manager.tasks.values()) running_tasks = tuple(t for t in tasks if t.worker_id != 0) if running_tasks: return task_worker_lookup(running_tasks, worker_id) else: raise BrokenProcessPool("All workers expired") class TaskManager: """Manages the tasks flow within the Pool. Tasks are registered, acknowledged and completed. Timing out and cancelled tasks are handled as well. """ def __init__(self, task_done_callback): self.tasks = {} self.task_done_callback = task_done_callback def register(self, task): self.tasks[task.id] = task def task_start(self, task_id, worker_id): task = self.tasks[task_id] task.worker_id = worker_id task.timestamp = time.time() task.set_running_or_notify_cancel() def task_done(self, task_id, result): """Set the tasks result and run the callback.""" try: task = self.tasks.pop(task_id) except KeyError: return # result of previously timeout Task else: if task.future.cancelled(): task.set_running_or_notify_cancel() elif isinstance(result, BaseException): task.future.set_exception(result) else: task.future.set_result(result) self.task_done_callback() def task_problem(self, task_id, error): """Set the task with the error it caused within the Pool.""" self.task_start(task_id, None) self.task_done(task_id, error) def timeout_tasks(self): return tuple(t for t in tuple(self.tasks.values()) if self.timeout(t)) def cancelled_tasks(self): return tuple(t for t in tuple(self.tasks.values()) if t.timestamp != 0 and t.future.cancelled()) @staticmethod def timeout(task): if task.timeout and task.started: return time.time() - task.timestamp > task.timeout else: return False class WorkerManager: """Manages the workers related mechanics within the Pool. Maintains the workers active and encapsulates their communication logic. """ def __init__(self, workers, worker_parameters): self.workers = {} self.workers_number = workers self.worker_parameters = worker_parameters self.pool_channel, self.workers_channel = channels() def dispatch(self, task): try: self.pool_channel.send(WorkerTask(task.id, task.payload)) except (pickle.PicklingError, TypeError) as error: raise error except (OSError, EnvironmentError, TypeError) as error: raise BrokenProcessPool(error) def receive(self, timeout): try: if self.pool_channel.poll(timeout): return self.pool_channel.recv() else: return NoMessage() except (OSError, EnvironmentError, TypeError) as error: raise BrokenProcessPool(error) def inspect_workers(self): """Updates the workers status. Returns the workers which have unexpectedly ended. """ workers = tuple(self.workers.values()) expired = tuple(w for w in workers if not w.is_alive()) for worker in expired: self.workers.pop(worker.pid) return ((w.pid, w.exitcode) for w in expired if w.exitcode != 0) def create_workers(self): for _ in range(self.workers_number - len(self.workers)): self.new_worker() def close_channels(self): self.pool_channel.close() self.workers_channel.close() def stop_workers(self): for worker_id in tuple(self.workers.keys()): self.stop_worker(worker_id, force=True) def new_worker(self): try: worker = launch_process( None, worker_process, True, self.worker_parameters, self.workers_channel) self.workers[worker.pid] = worker except (OSError, EnvironmentError) as error: raise BrokenProcessPool(error) def stop_worker(self, worker_id, force=False): try: if force: stop_process(self.workers.pop(worker_id)) else: with self.workers_channel.lock: stop_process(self.workers.pop(worker_id)) except ChannelError as error: raise BrokenProcessPool(error) except KeyError: return # worker already expired def worker_process(params, channel): """The worker process routines.""" signal(SIGINT, SIG_IGN) if params.initializer is not None: if not run_initializer(params.initializer, params.initargs): os._exit(1) try: for task in worker_get_next_task(channel, params.max_tasks): payload = task.payload result = process_execute( payload.function, *payload.args, **payload.kwargs) send_result(channel, Result(task.id, result)) except (EnvironmentError, OSError, RuntimeError) as error: os._exit(error.errno if error.errno else 1) except EOFError: os._exit(0) def worker_get_next_task(channel, max_tasks): counter = count() while max_tasks == 0 or next(counter) < max_tasks: yield fetch_task(channel) def send_result(pipe, result): """Send result handling pickling and communication errors.""" try: pipe.send(result) except (pickle.PicklingError, TypeError) as error: pipe.send(Problem(result.task, error)) def fetch_task(channel): while channel.poll(): try: return task_transaction(channel) except RuntimeError: continue # another worker got the task def task_transaction(channel): """Ensures a task is fetched and acknowledged atomically.""" with channel.lock: if channel.poll(0): task = channel.recv() channel.send(Acknowledgement(os.getpid(), task.id)) else: raise RuntimeError("Race condition between workers") return task def task_worker_lookup(running_tasks, worker_id): for task in running_tasks: if task.worker_id == worker_id: return task raise LookupError("Not found") def process_chunk(function, chunk): """Processes a chunk of the iterable passed to map dealing with errors.""" return [process_execute(function, *args) for args in chunk] NoMessage = namedtuple('NoMessage', ()) Result = namedtuple('Result', ('task', 'result')) Problem = namedtuple('Problem', ('task', 'error')) WorkerTask = namedtuple('WorkerTask', ('id', 'payload')) Acknowledgement = namedtuple('Acknowledgement', ('worker', 'task')) pebble-4.5.3/pebble/pool/thread.py000066400000000000000000000144441365601427000170120ustar00rootroot00000000000000# This file is part of Pebble. # Copyright (c) 2013-2020, Matteo Cafasso # Pebble is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License # as published by the Free Software Foundation, # either version 3 of the License, or (at your option) any later version. # Pebble is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public License # along with Pebble. If not, see . import time from itertools import count from multiprocessing import cpu_count from concurrent.futures import Future from pebble.common import execute, launch_thread from pebble.pool.base_pool import MapFuture, MapResults from pebble.pool.base_pool import BasePool, Task, TaskPayload from pebble.pool.base_pool import iter_chunks, run_initializer from pebble.pool.base_pool import CREATED, ERROR, RUNNING, SLEEP_UNIT class ThreadPool(BasePool): """Allows to schedule jobs within a Pool of Threads. max_workers is an integer representing the amount of desired process workers managed by the pool. If max_tasks is a number greater than zero, each worker will be restarted after performing an equal amount of tasks. initializer must be callable, if passed, it will be called every time a worker is started, receiving initargs as arguments. """ def __init__(self, max_workers=cpu_count(), max_tasks=0, initializer=None, initargs=()): super(ThreadPool, self).__init__( max_workers, max_tasks, initializer, initargs) self._pool_manager = PoolManager(self._context) def _start_pool(self): with self._context.state_mutex: if self._context.state == CREATED: self._pool_manager.start() self._loops = (launch_thread(None, pool_manager_loop, True, self._pool_manager),) self._context.state = RUNNING def _stop_pool(self): for loop in self._loops: loop.join() self._pool_manager.stop() def schedule(self, function, args=(), kwargs={}): """Schedules *function* to be run the Pool. *args* and *kwargs* will be forwareded to the scheduled function respectively as arguments and keyword arguments. A *concurrent.futures.Future* object is returned. """ self._check_pool_state() future = Future() payload = TaskPayload(function, args, kwargs) task = Task(next(self._task_counter), future, None, payload) self._context.task_queue.put(task) return future def map(self, function, *iterables, **kwargs): """Returns an iterator equivalent to map(function, iterables). *chunksize* controls the size of the chunks the iterable will be broken into before being passed to the function. If None the size will be controlled by the Pool. """ self._check_pool_state() timeout = kwargs.get('timeout') chunksize = kwargs.get('chunksize', 1) if chunksize < 1: raise ValueError("chunksize must be >= 1") futures = [self.schedule(process_chunk, args=(function, chunk)) for chunk in iter_chunks(chunksize, *iterables)] map_future = MapFuture(futures) if not futures: map_future.set_result(MapResults(futures)) return map_future def done_map(_): if not map_future.done(): map_future.set_result(MapResults(futures, timeout=timeout)) for future in futures: future.add_done_callback(done_map) setattr(future, 'map_future', map_future) return map_future def pool_manager_loop(pool_manager): context = pool_manager.context while context.alive: pool_manager.update_status() time.sleep(SLEEP_UNIT) class PoolManager: def __init__(self, context): self.workers = [] self.context = context def start(self): self.create_workers() def stop(self): for worker in self.workers: self.context.task_queue.put(None) for worker in tuple(self.workers): self.join_worker(worker) def update_status(self): expired = self.inspect_workers() for worker in expired: self.join_worker(worker) self.create_workers() def inspect_workers(self): return tuple(w for w in self.workers if not w.is_alive()) def create_workers(self): for _ in range(self.context.workers - len(self.workers)): worker = launch_thread(None, worker_thread, True, self.context) self.workers.append(worker) def join_worker(self, worker): worker.join() self.workers.remove(worker) def worker_thread(context): """The worker thread routines.""" queue = context.task_queue parameters = context.worker_parameters if parameters.initializer is not None: if not run_initializer(parameters.initializer, parameters.initargs): context.state = ERROR return for task in get_next_task(context, parameters.max_tasks): execute_next_task(task) queue.task_done() def get_next_task(context, max_tasks): counter = count() queue = context.task_queue while context.alive and (max_tasks == 0 or next(counter) < max_tasks): task = queue.get() if task is not None: if task.future.cancelled(): task.set_running_or_notify_cancel() queue.task_done() else: yield task def execute_next_task(task): payload = task.payload task.timestamp = time.time() task.set_running_or_notify_cancel() result = execute(payload.function, *payload.args, **payload.kwargs) if isinstance(result, BaseException): task.future.set_exception(result) else: task.future.set_result(result) def process_chunk(function, chunk): """Processes a chunk of the iterable passed to map dealing with errors.""" return [execute(function, *args) for args in chunk] pebble-4.5.3/setup.cfg000066400000000000000000000000351365601427000145770ustar00rootroot00000000000000[bdist_wheel] universal = 1 pebble-4.5.3/setup.py000066400000000000000000000023431365601427000144740ustar00rootroot00000000000000import os import fileinput from setuptools import setup, find_packages CWD = os.path.dirname(__file__) def package_version(): module_path = os.path.join(CWD, 'pebble', '__init__.py') for line in fileinput.input(module_path): if line.startswith('__version__'): return line.split('=')[-1].strip().replace('\'', '') setup( name="Pebble", version=package_version(), author="Matteo Cafasso", author_email="noxdafox@gmail.com", description=("Threading and multiprocessing eye-candy."), license="LGPL", keywords="thread process pool decorator", url="https://github.com/noxdafox/pebble", packages=find_packages(exclude=["tests"]), extras_require={":python_version<'3'": ["futures"]}, long_description=open(os.path.join(CWD, 'README.rst')).read(), classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "Operating System :: OS Independent", "Topic :: Software Development :: Libraries :: Python Modules", "License :: OSI Approved :: " + "GNU Library or Lesser General Public License (LGPL)" ], ) pebble-4.5.3/test/000077500000000000000000000000001365601427000137375ustar00rootroot00000000000000pebble-4.5.3/test/run-tests.sh000077500000000000000000000003411365601427000162400ustar00rootroot00000000000000#!/bin/bash # This script runs the tests singularly to overcome Python 3 # multiprocessing Process start methods limitations set -e for testfile in $(find test/ -name "test_*.py") do python -m pytest $testfile -v done pebble-4.5.3/test/test_concurrent_process_fork.py000066400000000000000000000167561365601427000223300ustar00rootroot00000000000000import os import sys import time import pickle import signal import unittest import threading import multiprocessing from concurrent.futures import CancelledError, TimeoutError from pebble import concurrent, ProcessExpired # set start method supported = False if sys.version_info.major > 2 and sys.version_info.minor > 3: methods = multiprocessing.get_all_start_methods() if 'fork' in methods: try: multiprocessing.set_start_method('fork') if multiprocessing.get_start_method() == 'fork': supported = True except RuntimeError: # child process pass else: supported = True @concurrent.process def decorated(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument @concurrent.process def error_decorated(): raise RuntimeError("BOOM!") @concurrent.process def pickling_error_decorated(): event = threading.Event() return event @concurrent.process def critical_decorated(): os._exit(123) @concurrent.process def decorated_cancel(): time.sleep(10) @concurrent.process(timeout=0.1) def long_decorated(): time.sleep(10) @concurrent.process(timeout=0.1) def sigterm_decorated(): signal.signal(signal.SIGTERM, signal.SIG_IGN) time.sleep(10) @concurrent.process() def name_keyword_argument(name='function_kwarg'): return name @concurrent.process(name='concurrent_process_name') def name_keyword_decorated(): return multiprocessing.current_process().name @concurrent.process(name='decorator_kwarg') def name_keyword_decorated_and_argument(name='bar'): return (multiprocessing.current_process().name, name) @concurrent.process(daemon=False) def daemon_keyword_decorated(): return multiprocessing.current_process().daemon class ProcessConcurrentObj: a = 0 def __init__(self): self.b = 1 @classmethod @concurrent.process def clsmethod(cls): return cls.a @concurrent.process def instmethod(self): return self.b @staticmethod @concurrent.process def stcmethod(): return 2 @unittest.skipIf(not supported, "Start method is not supported") class TestProcessConcurrent(unittest.TestCase): def setUp(self): self.results = 0 self.exception = None self.event = threading.Event() self.event.clear() self.concurrentobj = ProcessConcurrentObj() def callback(self, future): try: self.results = future.result() except (ProcessExpired, RuntimeError, TimeoutError) as error: self.exception = error finally: self.event.set() def test_docstring(self): """Process Fork docstring is preserved.""" self.assertEqual(decorated.__doc__, "A docstring.") def test_wrong_timeout(self): """Process Fork TypeError is raised if timeout is not number.""" with self.assertRaises(TypeError): @concurrent.process(timeout='Foo') def function(): return def test_class_method(self): """Process Fork decorated classmethods.""" future = ProcessConcurrentObj.clsmethod() self.assertEqual(future.result(), 0) def test_instance_method(self): """Process Fork decorated instance methods.""" future = self.concurrentobj.instmethod() self.assertEqual(future.result(), 1) def test_static_method(self): """Process Fork decorated static methods (Fork startmethod only).""" future = self.concurrentobj.stcmethod() self.assertEqual(future.result(), 2) def test_decorated_results(self): """Process Fork results are produced.""" future = decorated(1, 1) self.assertEqual(future.result(), 2) def test_decorated_results_callback(self): """Process Fork results are forwarded to the callback.""" future = decorated(1, 1) future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertEqual(self.results, 2) def test_error_decorated(self): """Process Fork errors are raised by future.result.""" future = error_decorated() with self.assertRaises(RuntimeError): future.result() def test_error_decorated_callback(self): """Process Fork errors are forwarded to callback.""" future = error_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, RuntimeError), msg=str(self.exception)) def test_pickling_error_decorated(self): """Process Fork pickling errors are raised by future.result.""" future = pickling_error_decorated() with self.assertRaises((pickle.PicklingError, TypeError)): future.result() def test_timeout_decorated(self): """Process Fork raises TimeoutError if so.""" future = long_decorated() with self.assertRaises(TimeoutError): future.result() def test_timeout_decorated_callback(self): """Process Fork TimeoutError is forwarded to callback.""" future = long_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, TimeoutError), msg=str(self.exception)) def test_decorated_dead_process(self): """Process Fork ProcessExpired is raised if process dies.""" future = critical_decorated() with self.assertRaises(ProcessExpired): future.result() def test_timeout_decorated_callback(self): """Process Fork ProcessExpired is forwarded to callback.""" future = critical_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, ProcessExpired), msg=str(self.exception)) def test_cancel_decorated(self): """Process Fork raises CancelledError if future was cancelled.""" future = decorated_cancel() future.cancel() self.assertRaises(CancelledError, future.result) @unittest.skipIf(os.name == 'nt', "Test won't run on Windows.") def test_decorated_ignoring_sigterm(self): """Process Fork Concurrent ignored SIGTERM signal are handled on Unix.""" future = sigterm_decorated() with self.assertRaises(TimeoutError): future.result() def test_name_keyword_argument(self): """name keyword can be passed to a decorated function process without name""" f = name_keyword_argument() fn_out = f.result() self.assertEqual(fn_out, "function_kwarg") def test_name_keyword_decorated(self): """ Check that a simple use case of the name keyword passed to the decorator works """ f = name_keyword_decorated() dec_out = f.result() self.assertEqual(dec_out, "concurrent_process_name") def test_name_keyword_decorated_result_colision(self): """name kwarg is handled without modifying the function kwargs""" f = name_keyword_decorated_and_argument(name="function_kwarg") dec_out, fn_out = f.result() self.assertEqual(dec_out, "decorator_kwarg") self.assertEqual(fn_out, "function_kwarg") def test_daemon_keyword_decorated(self): """Daemon keyword can be passed to a decorated function and spawns correctly.""" f = daemon_keyword_decorated() dec_out = f.result() self.assertEqual(dec_out, False) pebble-4.5.3/test/test_concurrent_process_forkserver.py000066400000000000000000000140751365601427000235470ustar00rootroot00000000000000import os import sys import time import pickle import signal import unittest import threading import multiprocessing from concurrent.futures import CancelledError, TimeoutError from pebble import concurrent, ProcessExpired # set start method supported = False if sys.version_info.major > 2 and sys.version_info.minor > 3: methods = multiprocessing.get_all_start_methods() if 'forkserver' in methods: try: multiprocessing.set_start_method('forkserver') if multiprocessing.get_start_method() == 'forkserver': supported = True except RuntimeError: # child process pass @concurrent.process def decorated(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument @concurrent.process def error_decorated(): raise RuntimeError("BOOM!") @concurrent.process def pickling_error_decorated(): event = threading.Event() return event @concurrent.process def critical_decorated(): os._exit(123) @concurrent.process def decorated_cancel(): time.sleep(10) @concurrent.process(timeout=0.1) def long_decorated(): time.sleep(10) @concurrent.process(timeout=0.1) def sigterm_decorated(): signal.signal(signal.SIGTERM, signal.SIG_IGN) time.sleep(10) @concurrent.process(daemon=False) def daemon_keyword_decorated(): return multiprocessing.current_process().daemon class ProcessConcurrentObj: a = 0 def __init__(self): self.b = 1 @classmethod @concurrent.process def clsmethod(cls): return cls.a @concurrent.process def instmethod(self): return self.b @unittest.skipIf(not supported, "Start method is not supported") class TestProcessConcurrent(unittest.TestCase): def setUp(self): self.results = 0 self.exception = None self.event = threading.Event() self.event.clear() self.concurrentobj = ProcessConcurrentObj() def callback(self, future): try: self.results = future.result() except (ProcessExpired, RuntimeError, TimeoutError) as error: self.exception = error finally: self.event.set() def test_docstring(self): """Process Forkserver docstring is preserved.""" self.assertEqual(decorated.__doc__, "A docstring.") def test_wrong_timeout(self): """Process Forkserver TypeError is raised if timeout is not number.""" with self.assertRaises(TypeError): @concurrent.process(timeout='Foo') def function(): return def test_class_method(self): """Process Forkserver decorated classmethods.""" future = ProcessConcurrentObj.clsmethod() self.assertEqual(future.result(), 0) def test_instance_method(self): """Process Forkserver decorated instance methods.""" future = self.concurrentobj.instmethod() self.assertEqual(future.result(), 1) def test_decorated_results(self): """Process Forkserver results are produced.""" future = decorated(1, 1) self.assertEqual(future.result(), 2) def test_decorated_results_callback(self): """Process Forkserver results are forwarded to the callback.""" future = decorated(1, 1) future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertEqual(self.results, 2) def test_error_decorated(self): """Process Forkserver errors are raised by future.result.""" future = error_decorated() with self.assertRaises(RuntimeError): future.result() def test_error_decorated_callback(self): """Process Forkserver errors are forwarded to callback.""" future = error_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, RuntimeError), msg=str(self.exception)) def test_pickling_error_decorated(self): """Process Forkserver pickling errors are raised by future.result.""" future = pickling_error_decorated() with self.assertRaises((pickle.PicklingError, TypeError)): future.result() def test_timeout_decorated(self): """Process Forkserver raises TimeoutError if so.""" future = long_decorated() with self.assertRaises(TimeoutError): future.result() def test_timeout_decorated_callback(self): """Process Forkserver TimeoutError is forwarded to callback.""" future = long_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, TimeoutError), msg=str(self.exception)) def test_decorated_dead_process(self): """Process Forkserver ProcessExpired is raised if process dies.""" future = critical_decorated() with self.assertRaises(ProcessExpired): future.result() def test_timeout_decorated_callback(self): """Process Forkserver ProcessExpired is forwarded to callback.""" future = critical_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, ProcessExpired), msg=str(self.exception)) def test_cancel_decorated(self): """Process Forkserver raises CancelledError if future was cancelled.""" future = decorated_cancel() future.cancel() self.assertRaises(CancelledError, future.result) @unittest.skipIf(os.name == 'nt', "Test won't run on Windows.") def test_decorated_ignoring_sigterm(self): """Process Forkserver Concurrent ignored SIGTERM signal are handled on Unix.""" future = sigterm_decorated() with self.assertRaises(TimeoutError): future.result() def test_daemon_keyword_decorated(self): """Daemon keyword can be passed to a decorated function and spawns correctly.""" f = daemon_keyword_decorated() dec_out = f.result() self.assertEqual(dec_out, False) pebble-4.5.3/test/test_concurrent_process_spawn.py000066400000000000000000000137431365601427000225100ustar00rootroot00000000000000import os import sys import time import pickle import signal import unittest import threading import multiprocessing from concurrent.futures import CancelledError, TimeoutError from pebble import concurrent, ProcessExpired # set start method supported = False if sys.version_info.major > 2 and sys.version_info.minor > 3: methods = multiprocessing.get_all_start_methods() if 'spawn' in methods: try: multiprocessing.set_start_method('spawn') if multiprocessing.get_start_method() == 'spawn': supported = True except RuntimeError: # child process pass @concurrent.process def decorated(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument @concurrent.process def error_decorated(): raise RuntimeError("BOOM!") @concurrent.process def pickling_error_decorated(): event = threading.Event() return event @concurrent.process def critical_decorated(): os._exit(123) @concurrent.process def decorated_cancel(): time.sleep(10) @concurrent.process(timeout=0.1) def long_decorated(): time.sleep(10) @concurrent.process(timeout=0.1) def sigterm_decorated(): signal.signal(signal.SIGTERM, signal.SIG_IGN) time.sleep(10) @concurrent.process(daemon=False) def daemon_keyword_decorated(): return multiprocessing.current_process().daemon class ProcessConcurrentObj: a = 0 def __init__(self): self.b = 1 @classmethod @concurrent.process def clsmethod(cls): return cls.a @concurrent.process def instmethod(self): return self.b @unittest.skipIf(not supported, "Start method is not supported") class TestProcessConcurrent(unittest.TestCase): def setUp(self): self.results = 0 self.exception = None self.event = threading.Event() self.event.clear() self.concurrentobj = ProcessConcurrentObj() def callback(self, future): try: self.results = future.result() except (ProcessExpired, RuntimeError, TimeoutError) as error: self.exception = error finally: self.event.set() def test_docstring(self): """Process Spawn docstring is preserved.""" self.assertEqual(decorated.__doc__, "A docstring.") def test_wrong_timeout(self): """Process Spawn TypeError is raised if timeout is not number.""" with self.assertRaises(TypeError): @concurrent.process(timeout='Foo') def function(): return def test_class_method(self): """Process Spawn decorated classmethods.""" future = ProcessConcurrentObj.clsmethod() self.assertEqual(future.result(), 0) def test_instance_method(self): """Process Spawn decorated instance methods.""" future = self.concurrentobj.instmethod() self.assertEqual(future.result(), 1) def test_decorated_results(self): """Process Spawn results are produced.""" future = decorated(1, 1) self.assertEqual(future.result(), 2) def test_decorated_results_callback(self): """Process Spawn results are forwarded to the callback.""" future = decorated(1, 1) future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertEqual(self.results, 2) def test_error_decorated(self): """Process Spawn errors are raised by future.result.""" future = error_decorated() with self.assertRaises(RuntimeError): future.result() def test_error_decorated_callback(self): """Process Spawn errors are forwarded to callback.""" future = error_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, RuntimeError), msg=str(self.exception)) def test_pickling_error_decorated(self): """Process Spawn pickling errors are raised by future.result.""" future = pickling_error_decorated() with self.assertRaises((pickle.PicklingError, TypeError)): future.result() def test_timeout_decorated(self): """Process Spawn raises TimeoutError if so.""" future = long_decorated() with self.assertRaises(TimeoutError): future.result() def test_timeout_decorated_callback(self): """Process Spawn TimeoutError is forwarded to callback.""" future = long_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, TimeoutError), msg=str(self.exception)) def test_decorated_dead_process(self): """Process Spawn ProcessExpired is raised if process dies.""" future = critical_decorated() with self.assertRaises(ProcessExpired): future.result() def test_timeout_decorated_callback(self): """Process Spawn ProcessExpired is forwarded to callback.""" future = critical_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, ProcessExpired), msg=str(self.exception)) def test_cancel_decorated(self): """Process Spawn raises CancelledError if future was cancelled.""" future = decorated_cancel() future.cancel() self.assertRaises(CancelledError, future.result) @unittest.skipIf(os.name == 'nt', "Test won't run on Windows.") def test_decorated_ignoring_sigterm(self): """Process Spawn Concurrent ignored SIGTERM signal are handled on Unix.""" future = sigterm_decorated() with self.assertRaises(TimeoutError): future.result() def test_daemon_keyword_decorated(self): """Daemon keyword can be passed to a decorated function and spawns correctly.""" f = daemon_keyword_decorated() dec_out = f.result() self.assertEqual(dec_out, False) pebble-4.5.3/test/test_concurrent_thread.py000066400000000000000000000102671365601427000210670ustar00rootroot00000000000000import unittest import threading from pebble import concurrent @concurrent.thread def decorated(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument @concurrent.thread def error_decorated(): raise RuntimeError("BOOM!") @concurrent.thread() def name_keyword_argument(name='function_kwarg'): return name @concurrent.thread(name='concurrent_thread_name') def name_keyword_decorated(): return threading.current_thread().name @concurrent.thread(name='decorator_kwarg') def name_keyword_decorated_and_argument(name='bar'): return (threading.current_thread().name, name) @concurrent.thread(daemon=False) def daemon_keyword_decorated(): return threading.current_thread().daemon class ThreadConcurrentObj: a = 0 def __init__(self): self.b = 1 @classmethod @concurrent.thread def clsmethod(cls): return cls.a @concurrent.thread def instmethod(self): return self.b @staticmethod @concurrent.thread def stcmethod(): return 2 class TestThreadConcurrent(unittest.TestCase): def setUp(self): self.results = 0 self.exception = None self.event = threading.Event() self.event.clear() self.concurrentobj = ThreadConcurrentObj() def callback(self, future): try: self.results = future.result() except (RuntimeError) as error: self.exception = error finally: self.event.set() def test_docstring(self): """Thread docstring is preserved.""" self.assertEqual(decorated.__doc__, "A docstring.") def test_class_method(self): """Thread decorated classmethods.""" future = ThreadConcurrentObj.clsmethod() self.assertEqual(future.result(), 0) def test_instance_method(self): """Thread decorated instance methods.""" future = self.concurrentobj.instmethod() self.assertEqual(future.result(), 1) def test_static_method(self): """Thread decorated static methods ( startmethod only).""" future = self.concurrentobj.stcmethod() self.assertEqual(future.result(), 2) def test_decorated_results(self): """Thread results are produced.""" future = decorated(1, 1) self.assertEqual(future.result(), 2) def test_decorated_results_callback(self): """Thread results are forwarded to the callback.""" future = decorated(1, 1) future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertEqual(self.results, 2) def test_error_decorated(self): """Thread errors are raised by future.result.""" future = error_decorated() with self.assertRaises(RuntimeError): future.result() def test_error_decorated_callback(self): """Thread errors are forwarded to callback.""" future = error_decorated() future.add_done_callback(self.callback) self.event.wait(timeout=1) self.assertTrue(isinstance(self.exception, RuntimeError), msg=str(self.exception)) def test_name_keyword_argument(self): """name keyword can be passed to a decorated function process without name """ f = name_keyword_argument() fn_out = f.result() self.assertEqual(fn_out, "function_kwarg") def test_name_keyword_decorated(self): """ Check that a simple use case of the name keyword passed to the decorator works """ f = name_keyword_decorated() dec_out = f.result() self.assertEqual(dec_out, "concurrent_thread_name") def test_name_keyword_decorated_result(self): """name kwarg is handled without modifying the function kwargs""" f = name_keyword_decorated_and_argument(name="function_kwarg") dec_out, fn_out = f.result() self.assertEqual(dec_out, "decorator_kwarg") self.assertEqual(fn_out, "function_kwarg") def test_daemon_keyword_decorated(self): """Daemon keyword can be passed to a decorated function and spawns correctly.""" f = daemon_keyword_decorated() dec_out = f.result() self.assertEqual(dec_out, False) pebble-4.5.3/test/test_pebble.py000066400000000000000000000144651365601427000166130ustar00rootroot00000000000000import os import time import signal import unittest import threading try: # Python 2 from Queue import Queue except: # Python 3 from queue import Queue from pebble import decorators from pebble.common import launch_thread from pebble import synchronized, sighandler from pebble import waitforthreads, waitforqueues results = 0 semaphore = threading.Semaphore() @synchronized def synchronized_function(): """A docstring.""" return decorators._synchronized_lock.acquire(False) @synchronized(semaphore) def custom_synchronized_function(): """A docstring.""" return semaphore.acquire(False) try: from signal import SIGALRM, SIGFPE, SIGIO @sighandler(SIGALRM) def signal_handler(signum, frame): """A docstring.""" global results results = 1 @sighandler((SIGFPE, SIGIO)) def signals_handler(signum, frame): pass except ImportError: pass def thread_function(value): time.sleep(value) return value def queue_function(queues, index, value): time.sleep(value) queues[index].put(value) return value def spurious_wakeup_function(value, lock): value = value / 2 time.sleep(value) lock.acquire() time.sleep(value) return value class TestSynchronizedDecorator(unittest.TestCase): def test_wrapper_decorator_docstring(self): """Synchronized docstring of the original function is preserved.""" self.assertEqual(synchronized_function.__doc__, "A docstring.") def test_syncronized_locked(self): """Synchronized Lock is acquired during execution of decorated function.""" self.assertFalse(synchronized_function()) def test_syncronized_released(self): """Synchronized Lock is released during execution of decorated function.""" synchronized_function() self.assertTrue(decorators._synchronized_lock.acquire(False)) decorators._synchronized_lock.release() def test_custom_syncronized_locked(self): """Synchronized semaphore is acquired during execution of decorated function.""" self.assertFalse(custom_synchronized_function()) def test_custom_syncronized_released(self): """Synchronized semaphore is acquired during execution of decorated function.""" custom_synchronized_function() self.assertTrue(semaphore.acquire(False)) semaphore.release() class TestSigHandler(unittest.TestCase): def test_wrapper_decorator_docstring(self): """Sighandler docstring of the original function is preserved.""" if os.name != 'nt': self.assertEqual(signal_handler.__doc__, "A docstring.") def test_sighandler(self): """Sighandler installs SIGALRM.""" if os.name != 'nt': self.assertEqual(signal.getsignal(signal.SIGALRM).__name__, signal_handler.__name__) def test_sighandler_multiple(self): """Sighandler installs SIGFPE and SIGIO.""" if os.name != 'nt': self.assertEqual(signal.getsignal(signal.SIGFPE).__name__, signals_handler.__name__) self.assertEqual(signal.getsignal(signal.SIGIO).__name__, signals_handler.__name__) def test_sigalarm_sighandler(self): """Sighandler for SIGALARM works.""" if os.name != 'nt': os.kill(os.getpid(), signal.SIGALRM) time.sleep(0.1) self.assertEqual(results, 1) class TestWaitForThreads(unittest.TestCase): def test_waitforthreads_single(self): """Waitforthreads waits for a single thread.""" thread = launch_thread(None, thread_function, True, 0.01) self.assertEqual(list(waitforthreads([thread]))[0], thread) def test_waitforthreads_multiple(self): """Waitforthreads waits for multiple threads.""" threads = [] for _ in range(5): threads.append(launch_thread(None, thread_function, True, 0.01)) time.sleep(0.1) self.assertEqual(list(waitforthreads(threads)), threads) def test_waitforthreads_timeout(self): """Waitforthreads returns empty list if timeout.""" thread = launch_thread(None, thread_function, True, 0.1) self.assertEqual(list(waitforthreads([thread], timeout=0.01)), []) def test_waitforthreads_restore(self): """Waitforthreads get_ident is restored to original one.""" if hasattr(threading, 'get_ident'): expected = threading.get_ident else: expected = threading._get_ident thread = launch_thread(None, thread_function, True, 0) time.sleep(0.01) waitforthreads([thread]) if hasattr(threading, 'get_ident'): self.assertEqual(threading.get_ident, expected) else: self.assertEqual(threading._get_ident, expected) def test_waitforthreads_spurious(self): """Waitforthreads tolerates spurious wakeups.""" lock = threading.RLock() thread = launch_thread(None, spurious_wakeup_function, True, 0.1, lock) self.assertEqual(list(waitforthreads([thread])), [thread]) class TestWaitForQueues(unittest.TestCase): def setUp(self): self.queues = [Queue(), Queue(), Queue()] def test_waitforqueues_single(self): """Waitforqueues waits for a single queue.""" launch_thread(None, queue_function, True, self.queues, 0, 0.01) self.assertEqual(list(waitforqueues(self.queues))[0], self.queues[0]) def test_waitforqueues_multiple(self): """Waitforqueues waits for multiple queues.""" for index in range(3): launch_thread(None, queue_function, True, self.queues, index, 0.01) time.sleep(0.1) self.assertEqual(list(waitforqueues(self.queues)), self.queues) def test_waitforqueues_timeout(self): """Waitforqueues returns empty list if timeout.""" launch_thread(None, queue_function, True, self.queues, 0, 0.1) self.assertEqual(list(waitforqueues(self.queues, timeout=0.01)), []) def test_waitforqueues_restore(self): """Waitforqueues Queue object is restored to original one.""" expected = sorted(dir(self.queues[0])) launch_thread(None, queue_function, True, self.queues, 0, 0) waitforqueues(self.queues) self.assertEqual(sorted(dir(self.queues[0])), expected) pebble-4.5.3/test/test_process_pool_fork.py000066400000000000000000000466701365601427000211150ustar00rootroot00000000000000import os import sys import time import pickle import signal import unittest import threading import multiprocessing from concurrent.futures import CancelledError, TimeoutError import pebble from pebble import ProcessPool, ProcessExpired # set start method supported = False if sys.version_info.major > 2 and sys.version_info.minor > 3: methods = multiprocessing.get_all_start_methods() if 'fork' in methods: try: multiprocessing.set_start_method('fork') if multiprocessing.get_start_method() == 'fork': supported = True except RuntimeError: # child process pass else: supported = True initarg = 0 def initializer(value): global initarg initarg = value def long_initializer(): time.sleep(60) def broken_initializer(): raise Exception("BOOM!") def function(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument def initializer_function(): return initarg def error_function(): raise Exception("BOOM!") def pickle_error_function(): return threading.Lock() def long_function(value=1): time.sleep(value) return value def pid_function(): time.sleep(0.1) return os.getpid() def sigterm_function(): signal.signal(signal.SIGTERM, signal.SIG_IGN) time.sleep(10) def suicide_function(): os._exit(1) @unittest.skipIf(not supported, "Start method is not supported") class TestProcessPool(unittest.TestCase): def setUp(self): global initarg initarg = 0 self.event = threading.Event() self.event.clear() self.result = None self.exception = None def callback(self, future): try: self.result = future.result() except Exception as error: self.exception = error finally: self.event.set() def test_process_pool_single_future(self): """Process Pool Fork single future.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) def test_process_pool_multiple_futures(self): """Process Pool Fork multiple futures.""" futures = [] with ProcessPool(max_workers=1) as pool: for _ in range(5): futures.append(pool.schedule(function, args=[1])) self.assertEqual(sum([f.result() for f in futures]), 5) def test_process_pool_callback(self): """Process Pool Fork result is forwarded to the callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule( function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) self.event.wait() self.assertEqual(self.result, 2) def test_process_pool_error(self): """Process Pool Fork errors are raised by future get.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) self.assertRaises(Exception, future.result) def test_process_pool_error_callback(self): """Process Pool Fork errors are forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, Exception)) def test_process_pool_pickling_error_task(self): """Process Pool Fork task pickling errors are raised by future.result.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[threading.Lock()]) self.assertRaises((pickle.PicklingError, TypeError), future.result) def test_process_pool_pickling_error_result(self): """Process Pool Fork result pickling errors are raised by future.result.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(pickle_error_function) self.assertRaises((pickle.PicklingError, TypeError), future.result) def test_process_pool_timeout(self): """Process Pool Fork future raises TimeoutError if so.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function, timeout=0.1) self.assertRaises(TimeoutError, future.result) def test_process_pool_timeout_callback(self): """Process Pool Fork TimeoutError is forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function, timeout=0.1) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, TimeoutError)) def test_process_pool_cancel(self): """Process Pool Fork future raises CancelledError if so.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function) time.sleep(0.1) # let the process pick up the task self.assertTrue(future.cancel()) self.assertRaises(CancelledError, future.result) def test_process_pool_cancel_callback(self): """Process Pool Fork CancelledError is forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function) future.add_done_callback(self.callback) time.sleep(0.1) # let the process pick up the task self.assertTrue(future.cancel()) self.event.wait() self.assertTrue(isinstance(self.exception, CancelledError)) def test_process_pool_different_process(self): """Process Pool Fork multiple futures are handled by different processes.""" futures = [] with ProcessPool(max_workers=2) as pool: for _ in range(0, 5): futures.append(pool.schedule(pid_function)) self.assertEqual(len(set([f.result() for f in futures])), 2) def test_process_pool_future_limit(self): """Process Pool Fork tasks limit is honored.""" futures = [] with ProcessPool(max_workers=1, max_tasks=2) as pool: for _ in range(0, 4): futures.append(pool.schedule(pid_function)) self.assertEqual(len(set([f.result() for f in futures])), 2) def test_process_pool_stop_timeout(self): """Process Pool Fork workers are stopped if future timeout.""" with ProcessPool(max_workers=1) as pool: future1 = pool.schedule(pid_function) pool.schedule(long_function, timeout=0.1) future2 = pool.schedule(pid_function) self.assertNotEqual(future1.result(), future2.result()) def test_process_pool_stop_cancel(self): """Process Pool Fork workers are stopped if future is cancelled.""" with ProcessPool(max_workers=1) as pool: future1 = pool.schedule(pid_function) cancel_future = pool.schedule(long_function) time.sleep(0.1) # let the process pick up the task cancel_future.cancel() future2 = pool.schedule(pid_function) self.assertNotEqual(future1.result(), future2.result()) def test_process_pool_initializer(self): """Process Pool Fork initializer is correctly run.""" with ProcessPool(initializer=initializer, initargs=[1]) as pool: future = pool.schedule(initializer_function) self.assertEqual(future.result(), 1) def test_process_pool_broken_initializer(self): """Process Pool Fork broken initializer is notified.""" with self.assertRaises(RuntimeError): with ProcessPool(initializer=broken_initializer) as pool: pool.active time.sleep(0.4) pool.schedule(function) def test_process_pool_running(self): """Process Pool Fork is active if a future is scheduled.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertTrue(pool.active) def test_process_pool_stopped(self): """Process Pool Fork is not active once stopped.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertFalse(pool.active) def test_process_pool_close_futures(self): """Process Pool Fork all futures are performed on close.""" futures = [] pool = ProcessPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.close() pool.join() map(self.assertTrue, [f.done() for f in futures]) def test_process_pool_close_stopped(self): """Process Pool Fork is stopped after close.""" pool = ProcessPool(max_workers=1) pool.schedule(function, args=[1]) pool.close() pool.join() self.assertFalse(pool.active) def test_process_pool_stop_futures(self): """Process Pool Fork not all futures are performed on stop.""" futures = [] pool = ProcessPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.stop() pool.join() self.assertTrue(len([f for f in futures if not f.done()]) > 0) def test_process_pool_stop_stopped(self): """Process Pool Fork is stopped after stop.""" pool = ProcessPool(max_workers=1) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertFalse(pool.active) def test_process_pool_stop_stopped_callback(self): """Process Pool Fork is stopped in callback.""" with ProcessPool(max_workers=1) as pool: def stop_pool_callback(_): pool.stop() future = pool.schedule(function, args=[1]) future.add_done_callback(stop_pool_callback) with self.assertRaises(RuntimeError): for index in range(10): time.sleep(0.1) pool.schedule(long_function, args=[index]) self.assertFalse(pool.active) def test_process_pool_large_data(self): """Process Pool Fork large data is sent on the channel.""" data = "a" * 1098 * 1024 * 50 # 50 Mb with ProcessPool(max_workers=1) as pool: future = pool.schedule( function, args=[data], kwargs={'keyword_argument': ''}) self.assertEqual(data, future.result()) def test_process_pool_stop_large_data(self): """Process Pool Fork is stopped if large data is sent on the channel.""" data = "a" * 1098 * 1024 * 50 # 50 Mb pool = ProcessPool(max_workers=1) pool.schedule(function, args=[data]) pool.stop() pool.join() self.assertFalse(pool.active) def test_process_pool_join_workers(self): """Process Pool Fork no worker is running after join.""" pool = ProcessPool(max_workers=4) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertEqual(len(pool._pool_manager.worker_manager.workers), 0) def test_process_pool_join_running(self): """Process Pool Fork RuntimeError is raised if active pool joined.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertRaises(RuntimeError, pool.join) def test_process_pool_join_futures_timeout(self): """Process Pool Fork TimeoutError is raised if join on long futures.""" pool = ProcessPool(max_workers=1) for _ in range(2): pool.schedule(long_function) pool.close() self.assertRaises(TimeoutError, pool.join, 0.4) pool.stop() pool.join() def test_process_pool_callback_error(self): """Process Pool Fork does not stop if error in callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) # sleep enough to ensure callback is run time.sleep(0.1) pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) def test_process_pool_exception_isolated(self): """Process Pool Fork an Exception does not affect other futures.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) try: future.result() except Exception: pass future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) @unittest.skipIf(os.name == 'nt', "Test won't run on Windows'.") def test_process_pool_ignoring_sigterm(self): """Process Pool Fork ignored SIGTERM signal are handled on Unix.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(sigterm_function, timeout=0.2) with self.assertRaises(TimeoutError): future.result() def test_process_pool_expired_worker(self): """Process Pool Fork unexpect death of worker raises ProcessExpired.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(suicide_function) self.assertRaises(ProcessExpired, future.result) def test_process_pool_map(self): """Process Pool Fork map simple.""" elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_empty(self): """Process Pool Fork map no elements.""" elements = [] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_single(self): """Process Pool Fork map one element.""" elements = [0] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_multi(self): """Process Pool Fork map multiple iterables.""" expected = (2, 4) with ProcessPool(max_workers=1) as pool: future = pool.map(function, (1, 2, 3), (1, 2)) generator = future.result() self.assertEqual(tuple(generator), expected) def test_process_pool_map_one_chunk(self): """Process Pool Fork map chunksize 1.""" elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements, chunksize=1) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_zero_chunk(self): """Process Pool Fork map chunksize 0.""" with ProcessPool(max_workers=1) as pool: with self.assertRaises(ValueError): pool.map(function, [], chunksize=0) def test_process_pool_map_timeout(self): """Process Pool Fork map with timeout.""" raised = [] elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(long_function, elements, timeout=0.1) generator = future.result() while True: try: next(generator) except TimeoutError as error: raised.append(error) except StopIteration: break self.assertTrue(all((isinstance(e, TimeoutError) for e in raised))) def test_process_pool_map_timeout_chunks(self): """Process Pool Fork map timeout is assigned per chunk.""" elements = [0.1]*10 with ProcessPool(max_workers=1) as pool: # it takes 0.5s to process a chunk future = pool.map( long_function, elements, chunksize=5, timeout=0.8) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_error(self): """Process Pool Fork errors do not stop the iteration.""" raised = None elements = [1, 'a', 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() while True: try: result = next(generator) except TypeError as error: raised = error except StopIteration: break self.assertEqual(result, 3) self.assertTrue(isinstance(raised, TypeError)) def test_process_pool_map_cancel(self): """Process Pool Fork cancel iteration.""" with ProcessPool(max_workers=1) as pool: future = pool.map(long_function, range(5)) generator = future.result() self.assertEqual(next(generator), 0) future.cancel() for _ in range(4): with self.assertRaises(CancelledError): next(generator) # DEADLOCK TESTS def broken_worker_process_tasks(_, channel): """Process failing in receiving new tasks.""" with channel.mutex.reader: os._exit(1) def broken_worker_process_result(_, channel): """Process failing in delivering result.""" try: for _ in pebble.pool.process.worker_get_next_task(channel, 2): with channel.mutex.writer: os._exit(1) except OSError: os._exit(1) @unittest.skipIf(not supported, "Start method is not supported") class TestProcessPoolDeadlockOnNewFutures(unittest.TestCase): def setUp(self): self.worker_process = pebble.pool.process.worker_process pebble.pool.process.worker_process = broken_worker_process_tasks pebble.pool.channel.LOCK_TIMEOUT = 0.1 def tearDown(self): pebble.pool.process.worker_process = self.worker_process pebble.pool.channel.LOCK_TIMEOUT = 60 def test_pool_deadlock_stop(self): """Process Pool Fork reading deadlocks are stopping the Pool.""" with self.assertRaises(RuntimeError): pool = pebble.ProcessPool(max_workers=1) for _ in range(10): pool.schedule(function) time.sleep(0.1) @unittest.skipIf(not supported, "Start method is not supported") class TestProcessPoolDeadlockOnResult(unittest.TestCase): def setUp(self): self.worker_process = pebble.pool.process.worker_process pebble.pool.process.worker_process = broken_worker_process_result pebble.pool.channel.LOCK_TIMEOUT = 0.1 def tearDown(self): pebble.pool.process.worker_process = self.worker_process pebble.pool.channel.LOCK_TIMEOUT = 60 def test_pool_deadlock(self): """Process Pool Fork no deadlock if writing worker dies locking channel.""" with pebble.ProcessPool(max_workers=1) as pool: with self.assertRaises(pebble.ProcessExpired): pool.schedule(function).result() def test_pool_deadlock_stop(self): """Process Pool Fork writing deadlocks are stopping the Pool.""" with self.assertRaises(RuntimeError): pool = pebble.ProcessPool(max_workers=1) for _ in range(10): pool.schedule(function) time.sleep(0.1) pebble-4.5.3/test/test_process_pool_forkserver.py000066400000000000000000000427261365601427000223420ustar00rootroot00000000000000import os import sys import time import pickle import signal import unittest import threading import multiprocessing from concurrent.futures import CancelledError, TimeoutError import pebble from pebble import ProcessPool, ProcessExpired # set start method supported = False if sys.version_info.major > 2 and sys.version_info.minor > 3: methods = multiprocessing.get_all_start_methods() if 'forkserver' in methods: try: multiprocessing.set_start_method('forkserver') if multiprocessing.get_start_method() == 'forkserver': supported = True else: raise Exception(multiprocessing.get_start_method()) except RuntimeError: # child process pass initarg = 0 def initializer(value): global initarg initarg = value def long_initializer(): time.sleep(60) def broken_initializer(): raise Exception("BOOM!") def function(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument def initializer_function(): return initarg def error_function(): raise Exception("BOOM!") def pickle_error_function(): return threading.Lock() def long_function(value=1): time.sleep(value) return value def pid_function(): time.sleep(0.1) return os.getpid() def sigterm_function(): signal.signal(signal.SIGTERM, signal.SIG_IGN) time.sleep(10) def suicide_function(): os._exit(1) @unittest.skipIf(not supported, "Start method is not supported") class TestProcessPool(unittest.TestCase): def setUp(self): global initarg initarg = 0 self.event = threading.Event() self.event.clear() self.result = None self.exception = None def callback(self, future): try: self.result = future.result() except Exception as error: self.exception = error finally: self.event.set() def test_process_pool_single_future(self): """Process Pool Forkserver single future.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) def test_process_pool_multiple_futures(self): """Process Pool Forkserver multiple futures.""" futures = [] with ProcessPool(max_workers=1) as pool: for _ in range(5): futures.append(pool.schedule(function, args=[1])) self.assertEqual(sum([f.result() for f in futures]), 5) def test_process_pool_callback(self): """Process Pool Forkserver result is forwarded to the callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule( function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) self.event.wait() self.assertEqual(self.result, 2) def test_process_pool_error(self): """Process Pool Forkserver errors are raised by future get.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) self.assertRaises(Exception, future.result) def test_process_pool_error_callback(self): """Process Pool Forkserver errors are forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, Exception)) def test_process_pool_pickling_error_task(self): """Process Pool Forkserver task pickling errors are raised by future.result.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[threading.Lock()]) self.assertRaises((pickle.PicklingError, TypeError), future.result) def test_process_pool_pickling_error_result(self): """Process Pool Forkserver result pickling errors are raised by future.result.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(pickle_error_function) self.assertRaises((pickle.PicklingError, TypeError), future.result) def test_process_pool_timeout(self): """Process Pool Forkserver future raises TimeoutError if so.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function, timeout=0.1) self.assertRaises(TimeoutError, future.result) def test_process_pool_timeout_callback(self): """Process Pool Forkserver TimeoutError is forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function, timeout=0.1) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, TimeoutError)) def test_process_pool_cancel(self): """Process Pool Forkserver future raises CancelledError if so.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function) time.sleep(0.1) # let the process pick up the task self.assertTrue(future.cancel()) self.assertRaises(CancelledError, future.result) def test_process_pool_cancel_callback(self): """Process Pool Forkserver CancelledError is forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function) future.add_done_callback(self.callback) time.sleep(0.1) # let the process pick up the task self.assertTrue(future.cancel()) self.event.wait() self.assertTrue(isinstance(self.exception, CancelledError)) def test_process_pool_different_process(self): """Process Pool Forkserver multiple futures are handled by different processes.""" futures = [] with ProcessPool(max_workers=2) as pool: for _ in range(0, 5): futures.append(pool.schedule(pid_function)) self.assertEqual(len(set([f.result() for f in futures])), 2) def test_process_pool_future_limit(self): """Process Pool Forkserver tasks limit is honored.""" futures = [] with ProcessPool(max_workers=1, max_tasks=2) as pool: for _ in range(0, 4): futures.append(pool.schedule(pid_function)) self.assertEqual(len(set([f.result() for f in futures])), 2) def test_process_pool_stop_timeout(self): """Process Pool Forkserver workers are stopped if future timeout.""" with ProcessPool(max_workers=1) as pool: future1 = pool.schedule(pid_function) pool.schedule(long_function, timeout=0.1) future2 = pool.schedule(pid_function) self.assertNotEqual(future1.result(), future2.result()) def test_process_pool_stop_cancel(self): """Process Pool Forkserver workers are stopped if future is cancelled.""" with ProcessPool(max_workers=1) as pool: future1 = pool.schedule(pid_function) cancel_future = pool.schedule(long_function) time.sleep(0.1) # let the process pick up the task cancel_future.cancel() future2 = pool.schedule(pid_function) self.assertNotEqual(future1.result(), future2.result()) def test_process_pool_initializer(self): """Process Pool Forkserver initializer is correctly run.""" with ProcessPool(initializer=initializer, initargs=[1]) as pool: future = pool.schedule(initializer_function) self.assertEqual(future.result(), 1) def test_process_pool_broken_initializer(self): """Process Pool Forkserver broken initializer is notified.""" with self.assertRaises(RuntimeError): with ProcessPool(initializer=broken_initializer) as pool: pool.active time.sleep(1) pool.schedule(function) def test_process_pool_running(self): """Process Pool Forkserver is active if a future is scheduled.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertTrue(pool.active) def test_process_pool_stopped(self): """Process Pool Forkserver is not active once stopped.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertFalse(pool.active) def test_process_pool_close_futures(self): """Process Pool Forkserver all futures are performed on close.""" futures = [] pool = ProcessPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.close() pool.join() map(self.assertTrue, [f.done() for f in futures]) def test_process_pool_close_stopped(self): """Process Pool Forkserver is stopped after close.""" pool = ProcessPool(max_workers=1) pool.schedule(function, args=[1]) pool.close() pool.join() self.assertFalse(pool.active) def test_process_pool_stop_futures(self): """Process Pool Forkserver not all futures are performed on stop.""" futures = [] pool = ProcessPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.stop() pool.join() self.assertTrue(len([f for f in futures if not f.done()]) > 0) def test_process_pool_stop_stopped(self): """Process Pool Forkserver is stopped after stop.""" pool = ProcessPool(max_workers=1) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertFalse(pool.active) def test_process_pool_stop_stopped_callback(self): """Process Pool Forkserver is stopped in callback.""" with ProcessPool(max_workers=1) as pool: def stop_pool_callback(_): pool.stop() future = pool.schedule(function, args=[1]) future.add_done_callback(stop_pool_callback) with self.assertRaises(RuntimeError): for index in range(10): time.sleep(0.1) pool.schedule(long_function, args=[index]) self.assertFalse(pool.active) def test_process_pool_large_data(self): """Process Pool Forkserver large data is sent on the channel.""" data = "a" * 1098 * 1024 * 50 # 50 Mb with ProcessPool(max_workers=1) as pool: future = pool.schedule( function, args=[data], kwargs={'keyword_argument': ''}) self.assertEqual(data, future.result()) def test_process_pool_stop_large_data(self): """Process Pool Forkserver is stopped if large data is sent on the channel.""" data = "a" * 1098 * 1024 * 50 # 50 Mb pool = ProcessPool(max_workers=1) pool.schedule(function, args=[data]) pool.stop() pool.join() self.assertFalse(pool.active) def test_process_pool_join_workers(self): """Process Pool Forkserver no worker is running after join.""" pool = ProcessPool(max_workers=4) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertEqual(len(pool._pool_manager.worker_manager.workers), 0) def test_process_pool_join_running(self): """Process Pool Forkserver RuntimeError is raised if active pool joined.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertRaises(RuntimeError, pool.join) def test_process_pool_join_futures_timeout(self): """Process Pool Forkserver TimeoutError is raised if join on long tasks.""" pool = ProcessPool(max_workers=1) for _ in range(2): pool.schedule(long_function) pool.close() self.assertRaises(TimeoutError, pool.join, 0.4) pool.stop() pool.join() def test_process_pool_callback_error(self): """Process Pool Forkserver does not stop if error in callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) # sleep enough to ensure callback is run time.sleep(0.1) pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) def test_process_pool_exception_isolated(self): """Process Pool Forkserver an Exception does not affect other futures.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) try: future.result() except Exception: pass future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) @unittest.skipIf(os.name == 'nt', "Test won't run on Windows'.") def test_process_pool_ignoring_sigterm(self): """Process Pool Forkserver ignored SIGTERM signal are handled on Unix.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(sigterm_function, timeout=0.2) with self.assertRaises(TimeoutError): future.result() def test_process_pool_expired_worker(self): """Process Pool Forkserver unexpect death of worker raises ProcessExpired.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(suicide_function) self.assertRaises(ProcessExpired, future.result) def test_process_pool_map(self): """Process Pool Forkserver map simple.""" elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_empty(self): """Process Pool Forkserver map no elements.""" elements = [] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_single(self): """Process Pool Forkserver map one element.""" elements = [0] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_multi(self): """Process Pool Forkserver map multiple iterables.""" expected = (2, 4) with ProcessPool(max_workers=1) as pool: future = pool.map(function, (1, 2, 3), (1, 2)) generator = future.result() self.assertEqual(tuple(generator), expected) def test_process_pool_map_one_chunk(self): """Process Pool Forkserver map chunksize 1.""" elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements, chunksize=1) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_zero_chunk(self): """Process Pool Forkserver map chunksize 0.""" with ProcessPool(max_workers=1) as pool: with self.assertRaises(ValueError): pool.map(function, [], chunksize=0) def test_process_pool_map_timeout(self): """Process Pool Forkserver map with timeout.""" raised = [] elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(long_function, elements, timeout=0.1) generator = future.result() while True: try: next(generator) except TimeoutError as error: raised.append(error) except StopIteration: break self.assertTrue(all((isinstance(e, TimeoutError) for e in raised))) def test_process_pool_map_timeout_chunks(self): """Process Pool Forkserver map timeout is assigned per chunk.""" elements = [0.1]*10 with ProcessPool(max_workers=1) as pool: # it takes 0.5s to process a chunk future = pool.map( long_function, elements, chunksize=5, timeout=0.8) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_error(self): """Process Pool Forkserver errors do not stop the iteration.""" raised = None elements = [1, 'a', 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() while True: try: next(generator) except TypeError as error: raised = error except StopIteration: break self.assertTrue(isinstance(raised, TypeError)) def test_process_pool_map_cancel(self): """Process Pool Forkserver cancel iteration.""" with ProcessPool(max_workers=1) as pool: future = pool.map(long_function, range(5)) generator = future.result() self.assertEqual(next(generator), 0) future.cancel() for _ in range(4): with self.assertRaises(CancelledError): next(generator) pebble-4.5.3/test/test_process_pool_spawn.py000066400000000000000000000422161365601427000212740ustar00rootroot00000000000000import os import sys import time import pickle import signal import unittest import threading import multiprocessing from concurrent.futures import CancelledError, TimeoutError import pebble from pebble import ProcessPool, ProcessExpired # set start method supported = False if sys.version_info.major > 2 and sys.version_info.minor > 3: methods = multiprocessing.get_all_start_methods() if 'spawn' in methods: try: multiprocessing.set_start_method('spawn') if multiprocessing.get_start_method() == 'spawn': supported = True except RuntimeError: # child process pass initarg = 0 def initializer(value): global initarg initarg = value def long_initializer(): time.sleep(60) def broken_initializer(): raise Exception("BOOM!") def function(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument def initializer_function(): return initarg def error_function(): raise Exception("BOOM!") def pickle_error_function(): return threading.Lock() def long_function(value=1): time.sleep(value) return value def pid_function(): time.sleep(0.1) return os.getpid() def sigterm_function(): signal.signal(signal.SIGTERM, signal.SIG_IGN) time.sleep(10) def suicide_function(): os._exit(1) @unittest.skipIf(not supported, "Start method is not supported") class TestProcessPool(unittest.TestCase): def setUp(self): global initarg initarg = 0 self.event = threading.Event() self.event.clear() self.result = None self.exception = None def callback(self, future): try: self.result = future.result() except Exception as error: self.exception = error finally: self.event.set() def test_process_pool_single_future(self): """Process Pool Spawn single future.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) def test_process_pool_multiple_futures(self): """Process Pool Spawn multiple futures.""" futures = [] with ProcessPool(max_workers=1) as pool: for _ in range(5): futures.append(pool.schedule(function, args=[1])) self.assertEqual(sum([f.result() for f in futures]), 5) def test_process_pool_callback(self): """Process Pool Spawn result is forwarded to the callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule( function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) self.event.wait() self.assertEqual(self.result, 2) def test_process_pool_error(self): """Process Pool Spawn errors are raised by future get.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) self.assertRaises(Exception, future.result) def test_process_pool_error_callback(self): """Process Pool Spawn errors are forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, Exception)) def test_process_pool_pickling_error_task(self): """Process Pool Spawn task pickling errors are raised by future.result.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[threading.Lock()]) self.assertRaises((pickle.PicklingError, TypeError), future.result) def test_process_pool_pickling_error_result(self): """Process Pool Spawn result pickling errors are raised by future.result.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(pickle_error_function) self.assertRaises((pickle.PicklingError, TypeError), future.result) def test_process_pool_timeout(self): """Process Pool Spawn future raises TimeoutError if so.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function, timeout=0.1) self.assertRaises(TimeoutError, future.result) def test_process_pool_timeout_callback(self): """Process Pool Spawn TimeoutError is forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function, timeout=0.1) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, TimeoutError)) def test_process_pool_cancel(self): """Process Pool Spawn future raises CancelledError if so.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function) time.sleep(0.1) # let the process pick up the task self.assertTrue(future.cancel()) self.assertRaises(CancelledError, future.result) def test_process_pool_cancel_callback(self): """Process Pool Spawn CancelledError is forwarded to callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(long_function) future.add_done_callback(self.callback) time.sleep(0.1) # let the process pick up the task self.assertTrue(future.cancel()) self.event.wait() self.assertTrue(isinstance(self.exception, CancelledError)) def test_process_pool_different_process(self): """Process Pool Spawn futures are handled by different processes.""" futures = [] with ProcessPool(max_workers=2) as pool: for _ in range(0, 5): futures.append(pool.schedule(pid_function)) self.assertEqual(len(set([f.result() for f in futures])), 2) def test_process_pool_future_limit(self): """Process Pool Spawn tasks limit is honored.""" futures = [] with ProcessPool(max_workers=1, max_tasks=2) as pool: for _ in range(0, 4): futures.append(pool.schedule(pid_function)) self.assertEqual(len(set([f.result() for f in futures])), 2) def test_process_pool_stop_timeout(self): """Process Pool Spawn workers are stopped if future timeout.""" with ProcessPool(max_workers=1) as pool: future1 = pool.schedule(pid_function) pool.schedule(long_function, timeout=0.1) future2 = pool.schedule(pid_function) self.assertNotEqual(future1.result(), future2.result()) def test_process_pool_stop_cancel(self): """Process Pool Spawn workers are stopped if future is cancelled.""" with ProcessPool(max_workers=1) as pool: future1 = pool.schedule(pid_function) cancel_future = pool.schedule(long_function) time.sleep(0.1) # let the process pick up the task cancel_future.cancel() future2 = pool.schedule(pid_function) self.assertNotEqual(future1.result(), future2.result()) def test_process_pool_initializer(self): """Process Pool Spawn initializer is correctly run.""" with ProcessPool(initializer=initializer, initargs=[1]) as pool: future = pool.schedule(initializer_function) self.assertEqual(future.result(), 1) def test_process_pool_broken_initializer(self): """Process Pool Spawn broken initializer is notified.""" with self.assertRaises(RuntimeError): with ProcessPool(initializer=broken_initializer) as pool: pool.active time.sleep(2) pool.schedule(function) def test_process_pool_running(self): """Process Pool Spawn is active if a future is scheduled.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertTrue(pool.active) def test_process_pool_stopped(self): """Process Pool Spawn is not active once stopped.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertFalse(pool.active) def test_process_pool_close_futures(self): """Process Pool Spawn all futures are performed on close.""" futures = [] pool = ProcessPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.close() pool.join() map(self.assertTrue, [f.done() for f in futures]) def test_process_pool_close_stopped(self): """Process Pool Spawn is stopped after close.""" pool = ProcessPool(max_workers=1) pool.schedule(function, args=[1]) pool.close() pool.join() self.assertFalse(pool.active) def test_process_pool_stop_futures(self): """Process Pool Spawn not all futures are performed on stop.""" futures = [] pool = ProcessPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.stop() pool.join() self.assertTrue(len([f for f in futures if not f.done()]) > 0) def test_process_pool_stop_stopped(self): """Process Pool Spawn is stopped after stop.""" pool = ProcessPool(max_workers=1) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertFalse(pool.active) def test_process_pool_stop_stopped_callback(self): """Process Pool Spawn is stopped in callback.""" with ProcessPool(max_workers=1) as pool: def stop_pool_callback(_): pool.stop() future = pool.schedule(function, args=[1]) future.add_done_callback(stop_pool_callback) with self.assertRaises(RuntimeError): for index in range(10): time.sleep(0.1) pool.schedule(long_function, args=[index]) self.assertFalse(pool.active) def test_process_pool_large_data(self): """Process Pool Spawn large data is sent on the channel.""" data = "a" * 1098 * 1024 * 50 # 50 Mb with ProcessPool(max_workers=1) as pool: future = pool.schedule( function, args=[data], kwargs={'keyword_argument': ''}) self.assertEqual(data, future.result()) def test_process_pool_stop_large_data(self): """Process Pool Spawn stopped if large data is sent on the channel.""" data = "a" * 1098 * 1024 * 50 # 50 Mb pool = ProcessPool(max_workers=1) pool.schedule(function, args=[data]) pool.stop() pool.join() self.assertFalse(pool.active) def test_process_pool_join_workers(self): """Process Pool Spawn no worker is running after join.""" pool = ProcessPool(max_workers=4) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertEqual(len(pool._pool_manager.worker_manager.workers), 0) def test_process_pool_join_running(self): """Process Pool Spawn RuntimeError is raised if active pool joined.""" with ProcessPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertRaises(RuntimeError, pool.join) def test_process_pool_join_futures_timeout(self): """Process Pool Spawn TimeoutError is raised if join on long tasks.""" pool = ProcessPool(max_workers=1) for _ in range(2): pool.schedule(long_function) pool.close() self.assertRaises(TimeoutError, pool.join, 0.4) pool.stop() pool.join() def test_process_pool_callback_error(self): """Process Pool Spawn does not stop if error in callback.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) # sleep enough to ensure callback is run time.sleep(0.1) pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) def test_process_pool_exception_isolated(self): """Process Pool Spawn an Exception does not affect other futures.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(error_function) try: future.result() except Exception: pass future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) @unittest.skipIf(os.name == 'nt', "Test won't run on Windows'.") def test_process_pool_ignoring_sigterm(self): """Process Pool Spawn ignored SIGTERM signal are handled on Unix.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(sigterm_function, timeout=0.2) with self.assertRaises(TimeoutError): future.result() def test_process_pool_expired_worker(self): """Process Pool Spawn unexpect death of worker raises ProcessExpired.""" with ProcessPool(max_workers=1) as pool: future = pool.schedule(suicide_function) self.assertRaises(ProcessExpired, future.result) def test_process_pool_map(self): """Process Pool Spawn map simple.""" elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_empty(self): """Process Pool Spawn map no elements.""" elements = [] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_single(self): """Process Pool Spawn map one element.""" elements = [0] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_multi(self): """Process Pool Spawn map multiple iterables.""" expected = (2, 4) with ProcessPool(max_workers=1) as pool: future = pool.map(function, (1, 2, 3), (1, 2)) generator = future.result() self.assertEqual(tuple(generator), expected) def test_process_pool_map_one_chunk(self): """Process Pool Spawn map chunksize 1.""" elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements, chunksize=1) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_zero_chunk(self): """Process Pool Spawn map chunksize 0.""" with ProcessPool(max_workers=1) as pool: with self.assertRaises(ValueError): pool.map(function, [], chunksize=0) def test_process_pool_map_timeout(self): """Process Pool Spawn map with timeout.""" raised = [] elements = [1, 2, 3] with ProcessPool(max_workers=1) as pool: future = pool.map(long_function, elements, timeout=0.1) generator = future.result() while True: try: next(generator) except TimeoutError as error: raised.append(error) except StopIteration: break self.assertTrue(all((isinstance(e, TimeoutError) for e in raised))) def test_process_pool_map_timeout_chunks(self): """Process Pool Spawn map timeout is assigned per chunk.""" elements = [0.1]*10 with ProcessPool(max_workers=1) as pool: # it takes 0.5s to process a chunk future = pool.map( long_function, elements, chunksize=5, timeout=0.8) generator = future.result() self.assertEqual(list(generator), elements) def test_process_pool_map_error(self): """Process Pool Spawn errors do not stop the iteration.""" raised = None elements = [1, 'a', 3] with ProcessPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() while True: try: next(generator) except TypeError as error: raised = error except StopIteration: break self.assertTrue(isinstance(raised, TypeError)) def test_process_pool_map_cancel(self): """Process Pool Spawn cancel iteration.""" with ProcessPool(max_workers=1) as pool: future = pool.map(long_function, range(5)) generator = future.result() self.assertEqual(next(generator), 0) future.cancel() for _ in range(4): with self.assertRaises(CancelledError): next(generator) pebble-4.5.3/test/test_thread_pool.py000066400000000000000000000262531365601427000176600ustar00rootroot00000000000000import time import unittest import threading from pebble import ThreadPool from concurrent.futures import CancelledError, TimeoutError initarg = 0 def error_callback(future): raise Exception("BOOM!") def initializer(value): global initarg initarg = value def broken_initializer(): raise Exception("BOOM!") def function(argument, keyword_argument=0): """A docstring.""" return argument + keyword_argument def initializer_function(): return initarg def error_function(): raise Exception("BOOM!") def long_function(value=0): time.sleep(1) return value def tid_function(): time.sleep(0.1) return threading.current_thread() class TestThreadPool(unittest.TestCase): def setUp(self): global initarg initarg = 0 self.event = threading.Event() self.event.clear() self.results = None self.exception = None def callback(self, future): try: self.results = future.result() except Exception as error: self.exception = error finally: self.event.set() def test_thread_pool_single_future(self): """Thread Pool single future.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) def test_thread_pool_multiple_futures(self): """Thread Pool multiple futures.""" futures = [] with ThreadPool(max_workers=1) as pool: for _ in range(5): futures.append(pool.schedule(function, args=[1])) self.assertEqual(sum([t.result() for t in futures]), 5) def test_thread_pool_callback(self): """Thread Pool results are forwarded to the callback.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule( function, args=[1], kwargs={'keyword_argument': 1}) future.add_done_callback(self.callback) self.event.wait() self.assertEqual(self.results, 2) def test_thread_pool_error(self): """Thread Pool errors are raised by future get.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(error_function) with self.assertRaises(Exception): future.result() def test_thread_pool_error_callback(self): """Thread Pool errors are forwarded to callback.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(error_function) future.add_done_callback(self.callback) self.event.wait() self.assertTrue(isinstance(self.exception, Exception)) def test_thread_pool_cancel_callback(self): """Thread Pool FutureCancelled is forwarded to callback.""" with ThreadPool(max_workers=1) as pool: pool.schedule(long_function) future = pool.schedule(long_function) future.add_done_callback(self.callback) future.cancel() self.event.wait() self.assertTrue(isinstance(self.exception, CancelledError)) def test_thread_pool_different_thread(self): """Thread Pool multiple futures are handled by different threades.""" futures = [] with ThreadPool(max_workers=2) as pool: for _ in range(0, 5): futures.append(pool.schedule(tid_function)) self.assertEqual(len(set([t.result() for t in futures])), 2) def test_thread_pool_tasks_limit(self): """Thread Pool future limit is honored.""" futures = [] with ThreadPool(max_workers=1, max_tasks=2) as pool: for _ in range(0, 4): futures.append(pool.schedule(tid_function)) self.assertEqual(len(set([t.result() for t in futures])), 2) def test_thread_pool_initializer(self): """Thread Pool initializer is correctly run.""" with ThreadPool(initializer=initializer, initargs=[1]) as pool: future = pool.schedule(initializer_function) self.assertEqual(future.result(), 1) def test_thread_pool_broken_initializer(self): """Thread Pool broken initializer is notified.""" with self.assertRaises(RuntimeError): with ThreadPool(initializer=broken_initializer) as pool: pool.active time.sleep(0.3) pool.schedule(function) def test_thread_pool_running(self): """Thread Pool is active if a future is scheduled.""" with ThreadPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertTrue(pool.active) def test_thread_pool_stopped(self): """Thread Pool is not active once stopped.""" with ThreadPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertFalse(pool.active) def test_thread_pool_close_futures(self): """Thread Pool all futures are performed on close.""" futures = [] pool = ThreadPool(max_workers=1) for index in range(10): futures.append(pool.schedule(function, args=[index])) pool.close() pool.join() map(self.assertTrue, [t.done() for t in futures]) def test_thread_pool_close_stopped(self): """Thread Pool is stopped after close.""" pool = ThreadPool(max_workers=1) pool.schedule(function, args=[1]) pool.close() pool.join() self.assertFalse(pool.active) def test_thread_pool_stop_futures(self): """Thread Pool not all futures are performed on stop.""" futures = [] pool = ThreadPool(max_workers=1) for index in range(10): futures.append(pool.schedule(long_function, args=[index])) pool.stop() pool.join() self.assertTrue(len([t for t in futures if not t.done()]) > 0) def test_thread_pool_stop_stopped(self): """Thread Pool is stopped after stop.""" pool = ThreadPool(max_workers=1) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertFalse(pool.active) def test_thread_pool_stop_stopped_function(self): """Thread Pool is stopped in function.""" with ThreadPool(max_workers=1) as pool: def function(): pool.stop() pool.schedule(function) self.assertFalse(pool.active) def test_thread_pool_stop_stopped_callback(self): """Thread Pool is stopped in callback.""" with ThreadPool(max_workers=1) as pool: def stop_pool_callback(_): pool.stop() future = pool.schedule(function, args=[1]) future.add_done_callback(stop_pool_callback) with self.assertRaises(RuntimeError): for index in range(10): time.sleep(0.1) pool.schedule(long_function, args=[index]) self.assertFalse(pool.active) def test_thread_pool_join_workers(self): """Thread Pool no worker is running after join.""" pool = ThreadPool(max_workers=4) pool.schedule(function, args=[1]) pool.stop() pool.join() self.assertEqual(len(pool._pool_manager.workers), 0) def test_thread_pool_join_running(self): """Thread Pool RuntimeError is raised if active pool joined.""" with ThreadPool(max_workers=1) as pool: pool.schedule(function, args=[1]) self.assertRaises(RuntimeError, pool.join) def test_thread_pool_join_futures_timeout(self): """Thread Pool TimeoutError is raised if join on long futures.""" pool = ThreadPool(max_workers=1) for _ in range(2): pool.schedule(long_function) pool.close() self.assertRaises(TimeoutError, pool.join, 0.4) pool.stop() pool.join() def test_thread_pool_exception_isolated(self): """Thread Pool an Exception does not affect other futures.""" with ThreadPool(max_workers=1) as pool: future = pool.schedule(error_function) try: future.result() except: pass future = pool.schedule(function, args=[1], kwargs={'keyword_argument': 1}) self.assertEqual(future.result(), 2) def test_thread_pool_map(self): """Thread Pool map simple.""" elements = [1, 2, 3] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_thread_pool_map_empty(self): """Thread Pool map no elements.""" elements = [] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_thread_pool_map_single(self): """Thread Pool map one element.""" elements = [0] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() self.assertEqual(list(generator), elements) def test_thread_pool_map_multi(self): """Thread Pool map multiple iterables.""" expected = (2, 4) with ThreadPool(max_workers=1) as pool: future = pool.map(function, (1, 2, 3), (1, 2)) generator = future.result() self.assertEqual(tuple(generator), expected) def test_thread_pool_map_one_chunk(self): """Thread Pool map chunksize 1.""" elements = [1, 2, 3] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements, chunksize=1) generator = future.result() self.assertEqual(list(generator), elements) def test_thread_pool_map_zero_chunk(self): """Thread Pool map chunksize 0.""" with ThreadPool(max_workers=1) as pool: with self.assertRaises(ValueError): pool.map(function, [], chunksize=0) def test_thread_pool_map_error(self): """Thread Pool errors do not stop the iteration.""" raised = None elements = [1, 'a', 3] with ThreadPool(max_workers=1) as pool: future = pool.map(function, elements) generator = future.result() while True: try: next(generator) except TypeError as error: raised = error except StopIteration: break self.assertTrue(isinstance(raised, TypeError)) def test_thread_pool_map_cancel(self): """Thread Pool cancel iteration.""" with ThreadPool(max_workers=1) as pool: future = pool.map(long_function, range(5)) generator = future.result() self.assertEqual(next(generator), 0) future.cancel() # either gets computed or it gets cancelled try: self.assertEqual(next(generator), 1) except CancelledError: pass for _ in range(3): with self.assertRaises(CancelledError): next(generator)