pax_global_header00006660000000000000000000000064146765762360014540gustar00rootroot0000000000000052 comment=33e3f91becf1004e2a5d698ef694dbc393f50e47 uqfoundation-pathos-33e3f91/000077500000000000000000000000001467657623600161075ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/.codecov.yml000066400000000000000000000015001467657623600203260ustar00rootroot00000000000000comment: false coverage: status: project: default: # Commits pushed to master should not make the overall # project coverage decrease by more than 1%: target: auto threshold: 1% patch: default: # Be tolerant on slight code coverage diff on PRs to limit # noisy red coverage status on github PRs. # Note The coverage stats are still uploaded # to codecov so that PR reviewers can see uncovered lines # in the github diff if they install the codecov browser # extension: # https://github.com/codecov/browser-extension target: auto threshold: 1% fixes: # reduces pip-installed path to git root and # remove dist-name from setup-installed path - "*/site-packages/::" - "*/site-packages/pathos-*::" uqfoundation-pathos-33e3f91/.coveragerc000066400000000000000000000017271467657623600202370ustar00rootroot00000000000000[run] # source = pathos include = */pathos/* */pathos/helpers/* */pathos/secure/* */pathos/xmlrpc/* omit = */tests/* */info.py branch = true # timid = true # parallel = true # and need to 'combine' data files # concurrency = multiprocessing # thread # data_file = $TRAVIS_BUILD_DIR/.coverage # debug = trace [paths] source = pathos pathos/helpers pathos/secure pathos/xmlrpc */site-packages/pathos */site-packages/pathos/helpers */site-packages/pathos/secure */site-packages/pathos/xmlrpc */site-packages/pathos-*/pathos */site-packages/pathos-*/pathos/helpers */site-packages/pathos-*/pathos/secure */site-packages/pathos-*/pathos/xmlrpc [report] include = */pathos/* */pathos/helpers/* */pathos/secure/* */pathos/xmlrpc/* exclude_lines = pragma: no cover raise NotImplementedError if __name__ == .__main__.: # show_missing = true ignore_errors = true # pragma: no branch # noqa uqfoundation-pathos-33e3f91/.gitignore000066400000000000000000000000401467657623600200710ustar00rootroot00000000000000.tox/ .cache/ *.egg-info/ *.pyc uqfoundation-pathos-33e3f91/.readthedocs.yml000066400000000000000000000005151467657623600211760ustar00rootroot00000000000000# readthedocs configuration file # see https://docs.readthedocs.io/en/stable/config-file/v2.html version: 2 # configure sphinx: configuration: docs/source/conf.py # build build: os: ubuntu-22.04 tools: python: "3.10" # install python: install: - method: pip path: . - requirements: docs/requirements.txt uqfoundation-pathos-33e3f91/.travis.yml000066400000000000000000000036451467657623600202300ustar00rootroot00000000000000dist: jammy language: python matrix: include: - python: '3.8' env: - python: '3.9' env: - COVERAGE="true" - python: '3.10' env: - python: '3.11' env: - python: '3.12' env: - python: '3.13-dev' env: - DILL="master" - PPFT="master" - MULTIPROCESS="master" - python: 'pypy3.8-7.3.9' # at 7.3.11 env: - MULTIPROCESS="true" - python: 'pypy3.9-7.3.9' # at 7.3.16 env: - MULTIPROCESS="true" - python: 'pypy3.10-7.3.17' env: - MULTIPROCESS="true" allow_failures: - python: 'pypy3.10-7.3.17' # CI missing fast_finish: true cache: pip: true before_install: - set -e # fail on any error - if [[ $COVERAGE == "true" ]]; then pip install coverage; fi - if [[ $DILL == "master" ]]; then pip install "https://github.com/uqfoundation/dill/archive/master.tar.gz"; fi - if [[ $PPFT == "master" ]]; then pip install "https://github.com/uqfoundation/ppft/archive/master.tar.gz"; fi - if [[ $MULTIPROCESS == "master" ]]; then pip install "https://github.com/uqfoundation/multiprocess/archive/master.tar.gz"; fi - if [[ $MULTIPROCESS == "true" ]]; then pip install multiprocess --no-binary multiprocess; fi install: - python -m pip install . script: - for test in pathos/tests/__init__.py; do echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; done - for test in pathos/tests/test_*.py; do echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; done after_success: - if [[ $COVERAGE == "true" ]]; then bash <(curl -s https://codecov.io/bash); else echo ''; fi - if [[ $COVERAGE == "true" ]]; then coverage report; fi uqfoundation-pathos-33e3f91/DEV_NOTES000066400000000000000000000013551467657623600174240ustar00rootroot00000000000000NOTES: - Tunnel deletes the launcher's ssh pid, but not the tunnel's pid (typically ssh pid + 1) --> could use "getpid" to grab it... - using "known_hosts" file to get $PROFILE is a temporary solution. Should use old gsl.infect (now pox) code to explore & find out, instead of relying on a user-built 'database'. - RPyC supposedly works for the existing code... check it out. ========== install_package ======================== notes: - failure on "login" (won't source .cshrc) - partial success on "shb-b" (sources .bash_profile; pp ok; but can't "use") - partial success on "upgrayedd" [same on shc-c?] (sources .profile; pp ok; rpyc won't install from scratch) --------------------------------------------------- uqfoundation-pathos-33e3f91/LICENSE000066400000000000000000000033761467657623600171250ustar00rootroot00000000000000Copyright (c) 2004-2016 California Institute of Technology. Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. All rights reserved. This software is available subject to the conditions and terms laid out below. By downloading and using this software you are agreeing to the following conditions. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the names of the copyright holders nor the names of any of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. uqfoundation-pathos-33e3f91/MANIFEST.in000066400000000000000000000004611467657623600176460ustar00rootroot00000000000000include LICENSE include README* include MANIFEST.in include pyproject.toml include tox.ini include version.py recursive-include applications * recursive-include docs * recursive-include examples * recursive-include examples2 * recursive-include scripts * include .* prune .git prune .coverage prune .eggs uqfoundation-pathos-33e3f91/README.md000066400000000000000000000247301467657623600173740ustar00rootroot00000000000000pathos ====== parallel graph management and execution in heterogeneous computing About the Pathos Framework -------------------------- ``pathos`` is a framework for heterogeneous computing. It provides a consistent high-level interface for configuring and launching parallel computations across heterogeneous resources. ``pathos`` provides configurable launchers for parallel and distributed computing, where each launcher contains the syntactic logic to configure and launch jobs in an execution environment. Examples of launchers that plug into ``pathos`` are: a queue-less MPI-based launcher (in ``pyina``), a ssh-based launcher (in ``pathos``), and a multi-process launcher (in ``multiprocess``). ``pathos`` provides a consistent interface for parallel and/or distributed versions of ``map`` and ``apply`` for each launcher, thus lowering the barrier for users to extend their code to parallel and/or distributed resources. The guiding design principle behind ``pathos`` is that ``map`` and ``apply`` should be drop-in replacements in otherwise serial code, and thus switching to one or more of the ``pathos`` launchers is all that is needed to enable code to leverage the selected parallel or distributed computing resource. This not only greatly reduces the time to convert a code to parallel, but it also enables a single code-base to be maintained instead of requiring parallel, serial, and distributed versions of a code. ``pathos`` maps can be nested, thus hierarchical heterogeneous computing is possible by merely selecting the desired hierarchy of ``map`` and ``pipe`` (``apply``) objects. The ``pathos`` framework is composed of several interoperating packages: * ``dill``: serialize all of Python * ``pox``: utilities for filesystem exploration and automated builds * ``klepto``: persistent caching to memory, disk, or database * ``multiprocess``: better multiprocessing and multithreading in Python * ``ppft``: distributed and parallel Python * ``pyina``: MPI parallel ``map`` and cluster scheduling * ``pathos``: graph management and execution in heterogeneous computing About Pathos ------------ The ``pathos`` package provides a few basic tools to make parallel and distributed computing more accessible to the end user. The goal of ``pathos`` is to enable the user to extend their own code to parallel and distributed computing with minimal refactoring. ``pathos`` provides methods for configuring, launching, monitoring, and controlling a service on a remote host. One of the most basic features of ``pathos`` is the ability to configure and launch a RPC-based service on a remote host. ``pathos`` seeds the remote host with the ``portpicker`` script, which allows the remote host to inform the localhost of a port that is available for communication. Beyond the ability to establish a RPC service, and then post requests, is the ability to launch code in parallel. Unlike parallel computing performed at the node level (typically with MPI), ``pathos`` enables the user to launch jobs in parallel across heterogeneous distributed resources. ``pathos`` provides distributed ``map`` and ``pipe`` algorithms, where a mix of local processors and distributed workers can be selected. ``pathos`` also provides a very basic automated load balancing service, as well as the ability for the user to directly select the resources. The high-level ``pool.map`` interface, yields a ``map`` implementation that hides the RPC internals from the user. With ``pool.map``, the user can launch their code in parallel, and as a distributed service, using standard Python and without writing a line of server or parallel batch code. RPC servers and communication in general is known to be insecure. However, instead of attempting to make the RPC communication itself secure, ``pathos`` provides the ability to automatically wrap any distributes service or communication in a ssh-tunnel. Ssh is a universally trusted method. Using ssh-tunnels, ``pathos`` has launched several distributed calculations on national lab clusters, and to date has performed test calculations that utilize node-to-node communication between several national lab clusters and a user's laptop. ``pathos`` allows the user to configure and launch at a very atomistic level, through raw access to ssh and scp. ``pathos`` is the core of a Python framework for heterogeneous computing. ``pathos`` is in active development, so any user feedback, bug reports, comments, or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/pathos/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. Major Features -------------- ``pathos`` provides a configurable distributed parallel ``map`` interface to launching RPC service calls, with: * a ``map`` interface that meets and extends the Python ``map`` standard * the ability to submit service requests to a selection of servers * the ability to tunnel server communications with ssh The ``pathos`` core is built on low-level communication to remote hosts using ssh. The interface to ssh, scp, and ssh-tunneled connections can: * configure and launch remote processes with ssh * configure and copy file objects with scp * establish an tear-down a ssh-tunnel To get up and running quickly, ``pathos`` also provides infrastructure to: * easily establish a ssh-tunneled connection to a RPC server Current Release [![Downloads](https://static.pepy.tech/personalized-badge/pathos?period=total&units=international_system&left_color=grey&right_color=blue&left_text=pypi%20downloads)](https://pepy.tech/project/pathos) [![Conda Downloads](https://img.shields.io/conda/dn/conda-forge/pathos?color=blue&label=conda%20downloads)](https://anaconda.org/conda-forge/pathos) [![Stack Overflow](https://img.shields.io/badge/stackoverflow-get%20help-black.svg)](https://stackoverflow.com/questions/tagged/pathos) --------------- The latest released version of ``pathos`` is available from: https://pypi.org/project/pathos ``pathos`` is distributed under a 3-clause BSD license. Development Version [![Support](https://img.shields.io/badge/support-the%20UQ%20Foundation-purple.svg?style=flat&colorA=grey&colorB=purple)](http://www.uqfoundation.org/pages/donate.html) [![Documentation Status](https://readthedocs.org/projects/pathos/badge/?version=latest)](https://pathos.readthedocs.io/en/latest/?badge=latest) [![Build Status](https://travis-ci.com/uqfoundation/pathos.svg?label=build&logo=travis&branch=master)](https://travis-ci.com/github/uqfoundation/pathos) [![codecov](https://codecov.io/gh/uqfoundation/pathos/branch/master/graph/badge.svg)](https://codecov.io/gh/uqfoundation/pathos) ------------------- You can get the latest development version with all the shiny new features at: https://github.com/uqfoundation If you have a new contribution, please submit a pull request. Installation ------------ ``pathos`` can be installed with ``pip``:: $ pip install pathos Requirements ------------ ``pathos`` requires: * ``python`` (or ``pypy``), **>=3.8** * ``setuptools``, **>=42** * ``pox``, **>=0.3.5** * ``dill``, **>=0.3.9** * ``ppft``, **>=1.7.6.9** * ``multiprocess``, **>=0.70.17** More Information ---------------- Probably the best way to get started is to look at the documentation at http://pathos.rtfd.io. Also see ``pathos.tests`` and https://github.com/uqfoundation/pathos/tree/master/examples for a set of scripts that demonstrate the configuration and launching of communications with ssh and scp, and demonstrate the configuration and execution of jobs in a hierarchical parallel workflow. You can run the test suite with ``python -m pathos.tests``. Tunnels and other connections to remote servers can be established with the ``pathos_connect`` script (or with ``python -m pathos``). See ``pathos_connect --help`` for more information. ``pathos`` also provides a ``portpicker`` script to select an open port (also available with ``python -m pathos.portpicker``). The source code is generally well documented, so further questions may be resolved by inspecting the code itself. Please feel free to submit a ticket on github, or ask a question on stackoverflow (**@Mike McKerns**). If you would like to share how you use ``pathos`` in your work, please send an email (to **mmckerns at uqfoundation dot org**). Important classes and functions are found here: * ``pathos.abstract_launcher`` [the worker pool API definition] * ``pathos.pools`` [all of the pathos worker pools] * ``pathos.core`` [the high-level command interface] * ``pathos.hosts`` [the hostname registry interface] * ``pathos.serial.SerialPool`` [the serial Python worker pool] * ``pathos.parallel.ParallelPool`` [the parallelpython worker pool] * ``pathos.multiprocessing.ProcessPool`` [the multiprocessing worker pool] * ``pathos.threading.ThreadPool`` [the multithreading worker pool] * ``pathos.connection.Pipe`` [the launcher base class] * ``pathos.secure.Pipe`` [the secure launcher base class] * ``pathos.secure.Copier`` [the secure copier base class] * ``pathos.secure.Tunnel`` [the secure tunnel base class] * ``pathos.selector.Selector`` [the selector base class] * ``pathos.server.Server`` [the server base class] * ``pathos.profile`` [profiling in threads and processes] * ``pathos.maps`` [standalone map instances] ``pathos`` also provides two convenience scripts that are used to establish secure distributed connections. These scripts are installed to a directory on the user's ``$PATH``, and thus can be run from anywhere: * ``portpicker`` [get the portnumber of an open port] * ``pathos_connect`` [establish tunnel and/or RPC server] Typing ``--help`` as an argument to any of the above scripts will print out an instructive help message. Citation -------- If you use ``pathos`` to do research that leads to publication, we ask that you acknowledge use of ``pathos`` by citing the following in your publication:: M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, "Building a framework for predictive science", Proceedings of the 10th Python in Science Conference, 2011; http://arxiv.org/pdf/1202.1056 Michael McKerns and Michael Aivazis, "pathos: a framework for heterogeneous computing", 2010- ; https://uqfoundation.github.io/project/pathos Please see https://uqfoundation.github.io/project/pathos or http://arxiv.org/pdf/1202.1056 for further information. uqfoundation-pathos-33e3f91/applications/000077500000000000000000000000001467657623600205755ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/applications/install-pp-1.6.4.2.sh000077500000000000000000000013011467657623600240160ustar00rootroot00000000000000# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # NAME=pp VERSION=1.6.4 REVISION=2 IDENTIFIER=$NAME-$VERSION.$REVISION PREFIX=$HOME rm -fr $IDENTIFIER.zip wget http://dev.danse.us/packages/$IDENTIFIER.zip #wget http://www.parallelpython.com/downloads/$NAME/$IDENTIFIER.tar.gz rm -fr $IDENTIFIER tar zxvf $IDENTIFIER.zip cd $IDENTIFIER python setup.py build python setup.py install --prefix=$PREFIX cd .. uqfoundation-pathos-33e3f91/applications/install-rpyc-3.0.6.sh000077500000000000000000000014651467657623600242250ustar00rootroot00000000000000# Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # NAME=rpyc VERSION=3.0 REVISION=6 IDENTIFIER=$NAME-$VERSION.$REVISION PREFIX=$HOME rm -fr $IDENTIFIER.tar.gz wget http://superb-east.dl.sourceforge.net/sourceforge/$NAME/$IDENTIFIER.tar.gz rm -fr $IDENTIFIER tar zxvf $IDENTIFIER.tar.gz cd $IDENTIFIER python setup.py build python setup.py install --prefix=$PREFIX cp -f $NAME/servers/classic_server.py $PREFIX/bin cp -f $NAME/servers/registry_server.py $PREFIX/bin cp -f $NAME/servers/vdbconf.py $PREFIX/bin cd .. uqfoundation-pathos-33e3f91/applications/install_pathos_server.py000066400000000000000000000115101467657623600255570ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ remote bash-script installation of selected package Usage: python install_pathos_server.py [package] [version] [hostname] [package] - name of the package to install [version] - version of the package to install [hostname] - name of the host on which to install the package """ from pathos.core import copy,execute from pathos.hosts import get_profile, register_profiles if __name__ == '__main__': ##### CONFIGURATION & INPUT ######################## # set the default remote host rhost = 'localhost' #rhost = 'foobar.danse.us' #rhost = 'computer.cacr.caltech.edu' # set any 'special' profiles (those which don't use default_profie) profiles = {'foobar.danse.us':'.profile', 'computer.cacr.caltech.edu':'.cshrc'} from time import sleep delay = 0.0 big_delay = 5.0 # set the default package & version package = 'pp' #XXX: package name MUST correspond to X in installer-X.sh version = '1.5.7' #XXX: also hardwired in installer-X.sh print("""Usage: python install_pathos_server.py [package] [version] [hostname] [package] - name of the package to install [version] - version of the package to install [hostname] - name of the host on which to install the package defaults are: "%s" "%s" "%s".""" % (package, version, rhost)) # get package to install from user import sys if '--help' in sys.argv: sys.exit(0) try: myinp = sys.argv[1],sys.argv[2] except: myinp = None if myinp: package,version = myinp #XXX: should test validity here... (filename) else: pass # use default del myinp # get remote hostname from user import sys try: myinp = sys.argv[3] except: myinp = None if myinp: rhost = myinp #XXX: should test rhost validity here... (how ?) else: pass # use default del myinp # get remote profile (this should go away soon) import sys try: myinp = sys.argv[4] except: myinp = None if myinp: rprof = myinp #XXX: should test validity here... (filename) profiles = {rhost:rprof} else: pass # use default del myinp # my remote environment (should be auto-detected) register_profiles(profiles) profile = get_profile(rhost) ##### CONFIGURATION & INPUT ######################## file = 'install-%s-%s.sh' % (package,version) # XXX: should use easy_install, if is installed... import tempfile # tempfile.tempdir = "~" #XXX: uncomment if cannot install to '/tmp' dest = tempfile.mktemp()+"_install" #XXX: checks local (not remote) # check for existing installation command = "source %s; python -c 'import %s'" % (profile,package) error = execute(command,rhost).response() if error in ['', None]: print('%s is already installed on %s' % (package,rhost)) # elif error[:39] == 'This system is available for legitimate use'[:39] \ # and rhost[:3] == 'shc-b.cacr.caltech.edu'[:3]: ## and error[-35:-1] == 'an authorized user of this system.'[-35:] \ # print('%s is already installed on %s' % (package,rhost)) #XXX: could parse 'error' for "ImportError" ==> not installed #XXX: could use command="python -c 'import X; X.__version__'" #XXX ...returns version# or "AttributeError" ==> non-standard version tag else: print(error) sleep(delay) # create install directory command = 'mkdir -p %s' % dest #FIXME: *nix only report = execute(command,rhost).response() #XXX: could check for clean install by parsing for "Error" (?) sleep(delay) # copy over the installer to remote host copy(file,rhost,dest) sleep(delay) # run the installer command = 'cd %s; ./%s' % (dest,file) #FIXME: *nix only report = execute(command,rhost).response() #XXX: could check for clean install by parsing for "Error" (?) sleep(big_delay) # remove remote install file # killme = dest+'/'+file #FIXME: *nix only # command = 'rm -f %s' % killme #FIXME: *nix only # execute(command,rhost).response() # remove remote package unpacking directory # killme = dest+'/'+package+'-'+version #FIXME: dies for NON-STANDARD naming # command = 'rm -rf %s' % killme #FIXME: *nix only # execute(command,rhost).response() # remove remote install directory killme = dest command = 'rm -rf %s' % killme #FIXME: *nix only execute(command,rhost).response() # check installation command = "source %s; python -c 'import %s'" % (profile,package) error = execute(command,rhost).response() if error in ['', None]: pass # is installed else: print(error) # raise ImportError("failure to install package") uqfoundation-pathos-33e3f91/docs/000077500000000000000000000000001467657623600170375ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/docs/Makefile000066400000000000000000000012361467657623600205010ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = pathos SOURCEDIR = source BUILDDIR = build # Internal variables ALLSPHINXOPTS = $(SPHINXOPTS) $(SOURCEDIR) # Put it first so that "make" without argument is like "make help". help: @echo "Please use \`make html' to generate standalone HTML files" .PHONY: help clean html Makefile clean: -rm -rf $(BUILDDIR) html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR) -rm -f $(BUILDDIR)/../../scripts/_*py -rm -f $(BUILDDIR)/../../scripts/_*pyc -rm -rf $(BUILDDIR)/../../scripts/__pycache__ uqfoundation-pathos-33e3f91/docs/requirements.txt000066400000000000000000000023441467657623600223260ustar00rootroot00000000000000# Packages required to build docs # dependencies pinned as: # https://github.com/readthedocs/readthedocs.org/blob/d3606da9907bb4cd933abcf71c7bab9eb20435cd/requirements/docs.txt alabaster==0.7.16 anyio==4.4.0 babel==2.15.0 certifi==2024.7.4 charset-normalizer==3.3.2 click==8.1.7 colorama==0.4.6 docutils==0.20.1 exceptiongroup==1.2.1 h11==0.14.0 idna==3.7 imagesize==1.4.1 jinja2==3.1.4 markdown-it-py==3.0.0 markupsafe==2.1.5 mdit-py-plugins==0.4.1 mdurl==0.1.2 myst-parser==3.0.1 packaging==24.0 pygments==2.18.0 pyyaml==6.0.1 readthedocs-sphinx-search==0.3.2 requests==2.32.3 six==1.16.0 sniffio==1.3.1 snowballstemmer==2.2.0 sphinx==7.3.7 sphinx-autobuild==2024.4.16 sphinx-copybutton==0.5.2 sphinx-design==0.6.0 sphinx-hoverxref==1.4.0 sphinx-intl==2.2.0 sphinx-multiproject==1.0.0rc1 sphinx-notfound-page==1.0.2 sphinx-prompt==1.8.0 sphinx-rtd-theme==2.0.0rc2 sphinx-tabs==3.4.5 sphinxcontrib-applehelp==1.0.8 sphinxcontrib-devhelp==1.0.6 sphinxcontrib-htmlhelp==2.0.5 sphinxcontrib-httpdomain==1.8.1 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.7 sphinxcontrib-serializinghtml==1.1.10 starlette==0.37.2 tomli==2.0.1 typing-extensions==4.12.1 urllib3==2.2.2 uvicorn==0.30.0 watchfiles==0.22.0 websockets==12.0 uqfoundation-pathos-33e3f91/docs/source/000077500000000000000000000000001467657623600203375ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/docs/source/_static/000077500000000000000000000000001467657623600217655ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/docs/source/_static/css/000077500000000000000000000000001467657623600225555ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/docs/source/_static/css/custom.css000066400000000000000000000001251467657623600245770ustar00rootroot00000000000000div.sphinxsidebar { height: 100%; /* 100vh */ overflow: auto; /* overflow-y */ } uqfoundation-pathos-33e3f91/docs/source/conf.py000066400000000000000000000177401467657623600216470ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # pathos documentation build configuration file, created by # sphinx-quickstart on Tue Aug 8 06:50:58 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os from datetime import datetime import sys scripts = os.path.abspath('../../scripts') sys.path.insert(0, scripts) try: os.symlink(scripts+os.sep+'portpicker', scripts+os.sep+'_portpicker.py') os.symlink(scripts+os.sep+'pathos_connect', scripts+os.sep+'_pathos_connect.py') except: pass # Import the project import pathos # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'pathos' year = datetime.now().year copyright = '%d, The Uncertainty Quantification Foundation' % year author = 'Mike McKerns' # extension config github_project_url = "https://github.com/uqfoundation/pathos" autoclass_content = 'both' autodoc_default_options = { 'members': True, 'undoc-members': True, 'private-members': True, 'special-members': True, 'show-inheritance': True, 'imported-members': True, 'exclude-members': ( '__dict__,' '__slots__,' '__weakref__,' '__module__,' '_abc_impl,' '__init__,' '__annotations__,' '__dataclass_fields__,' ) } autodoc_typehints = 'description' autodoc_typehints_format = 'short' napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_ivar = True napoleon_use_param = True # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = pathos.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # Configure how the modules, functions, etc names look add_module_names = False modindex_common_prefix = ['pathos.']#,'pathos.helpers.','pathos.secure.','pathos.xmlrpc.'] # -- Options for HTML output ---------------------------------------------- # on_rtd is whether we are on readthedocs.io on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # if not on_rtd: html_theme = 'alabaster' #'bizstyle' html_css_files = ['css/custom.css',] #import sphinx_rtd_theme #html_theme = 'sphinx_rtd_theme' #html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] else: html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'github_user': 'uqfoundation', 'github_repo': 'pathos', 'github_button': False, 'github_banner': True, 'travis_button': True, 'codecov_button': True, 'donate_url': 'http://uqfoundation.org/pages/donate.html', 'gratipay_user': False, # username 'extra_nav_links': {'Module Index': 'py-modindex.html'}, # 'show_related': True, # 'globaltoc_collapse': True, 'globaltoc_maxdepth': 4, 'show_powered_by': False } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars if on_rtd: toc_style = 'localtoc.html', # display the toctree else: toc_style = 'globaltoc.html', # collapse the toctree html_sidebars = { '**': [ 'about.html', 'donate.html', 'searchbox.html', # 'navigation.html', toc_style, # defined above 'relations.html', # needs 'show_related':True option to display ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'pathosdoc' # Logo for sidebar html_logo = 'pathos.png' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pathos.tex', 'pathos Documentation', 'Mike McKerns', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pathos', 'pathos Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pathos', 'pathos Documentation', author, 'pathos', 'Parallel graph management and execution in heterogeneous computing.', 'Miscellaneous'), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/3/': None} # {'python': {'https://docs.python.org/': None}, # 'mystic': {'https://mystic.readthedocs.io/en/latest/', None}, # 'pyina': {'https://pyina.readthedocs.io/en/latest/', None}, # 'pox': {'https://pox.readthedocs.io/en/latest/', None}, # 'dill': {'https://dill.readthedocs.io/en/latest/', None}, # 'multiprocess': {'https://multiprocess.readthedocs.io/en/latest/', None}, # 'ppft': {'https://ppft.readthedocs.io/en/latest/', None}, # 'klepto': {'https://klepto.readthedocs.io/en/latest/', None}, # } uqfoundation-pathos-33e3f91/docs/source/helpers.rst000066400000000000000000000004221467657623600225310ustar00rootroot00000000000000pathos.helpers module documentation =================================== mp_helper module ---------------- .. automodule:: pathos.helpers.mp_helper .. :exclude-members: + pp_helper module ---------------- .. automodule:: pathos.helpers.pp_helper .. :exclude-members: + uqfoundation-pathos-33e3f91/docs/source/index.rst000066400000000000000000000004701467657623600222010ustar00rootroot00000000000000.. pathos documentation master file pathos package documentation ============================ .. toctree:: :hidden: :maxdepth: 2 self pathos scripts .. automodule:: pathos .. :exclude-members: + Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` uqfoundation-pathos-33e3f91/docs/source/pathos.png000066400000000000000000002314661467657623600223570ustar00rootroot00000000000000PNG  IHDRI("WCiCCPICC ProfilexXy8?L-0kf0'EٗDEDDHRHJ$zs>9>ss&A&?J6#>eii KYy `Ԯ)79rLGm8B@2>T)KX@ ޻vq;ཀྵC= @~==~N#AY_B鲮Au\*i@6\PIK? 8h[+wo ys064#ۋPlilnoomooD|p`Ȏ, }(9Pa.x<I#5(gbflU 49-$N? /꠿ؖD$T lU%r# j3BU-SlzeƬ&*!+zkc6wlGԏtvjqreuSte=f>\2*Dm?cs:<8H,"6<"?xJ%6j3vgbXb |b|l%vq WSSCӊ{fgfd^IͿ~/>`K⍭t%enIޖzW\G+ԫT+=~$Y'X~_OX +_mz\ђ,ܵM][gcW nް/* S_x}P[?]V!gDnc}tcX8ç ']jui*o=ӄLwͳvS?b5,a+W=~ot6ކ]|zۨ .%u+Ef [+g .(_B`DDl+KKݓޒ5;"7hܪ눔zF 斖Abp0֨ɈOs. iK=+'kʱK6ٶ۷;:>irCm{,G/0Xo}PycB T܏=)R q7CR2(YbrJ &Jf*jG0,_G uNӗ52DM4YZXYsa3dhW`pM']]/<4q\lOcmA >̈́ēD-p!185;r!X-GHqSKXs8%B,]d)쩹tmgRϺHl=wxQ=;|2%h1cEimbΖ5{uO@)p P@@4 vLبxp3_X {&# (`a0_%MX3#l.gEVpFG# H7d@#JHJӁBT-jV6vN. 0}}# ;C5#=;#4aaJcZg1hcfbcgKbcf_ q|&s/e;Wo?!%+Xz!eL>'aVEDD$ J z"Y"U(}_YܢLģWED4Ǖ5:b3Gi]ж}H?!Dkbaeiheep gCK/uxr3[{ {{{7A'| ROF0GFv1vLy FRg`=[_TyQ}]ʂj:|Z&;5zg+^]v8)Wo3Ms= 2W|dMo5`H 0 eî`e8/{F ыF"Ld (M$M:* DKGkE{3ygMOLLre,,y{X D˱?PЙRKUirPcUК"Ȉ"h3!z: M8hQ#/Bo30 0j3VLL9,,ͬlمٟrrlqr@k;Gg:?Z=={L18jTSWP=!Ґvɖ})VQ U@_uAM@P#[sYO|i96ٌ`*{XS[WW/WaDcWfm_;^< ,{f[FF6u[B$3K2S}yz 'B<3@1 a <@ ^@ x &A^ ؑ#e&-)pb^Ȏ/t1]q#{GvJן֎G?&P뿙]K'$Ѡz{p"$~)ΜH $ 0Hn$?B* UZHMԧ3S;w,cM8@Έ/}عC P-VbB P 9qݶaJh:`]NBSx]ƊπΚegt ^qYV" YbVTTl%ϲb=k`T"AryDl]nkok}!۳wu (% ***K 4$QV (C`۫Z`ih.{姴T@am>(-I@dqqMdg vq>S' >OdZY~`($q>&! CC0R7g{0_UH)('XIt y2ǒ2a!z:CͿ"m=)~h3};پ;oևZV>we ZsE~uz`"t$!b~^NֽiΜeEjz/}ť$=JPQcI3-TR$voG>wqkgv)^_( Gee;lw(D,).ur@R#1dٮ],jN>$@OAb IG _:\F &UU%e**Wm+<ě/Rd*+,IpќܖΆvu. αɯK+k AḀd(T^ٮQ"6ƒIV|U,_b__oy%CF{ 1q:52 hGKq9@͒4`z D4t)fpaƢ@RؑOpjZ: IvX!vɔMmIx7iuNUpT"VB^<7_ \3V,[n]JTCl$btڥKPRf:|~Q嫡΋7֯V:U_gկ|}%$MHE}JWb;8dڶkChx_oUͷ>݊WcEN4V2 2qD Gji$UVԛzFTJmzAġH NDŽ >> ]s`Rzd$emf :{(I2psI2$y1Q` rʖ)? pH.&#s̱E>z(zT+V~O%S E`e?|p6O6GNWmחc=1.ʧڜO|.|^' zcs\@fZ+T<581*dtR+ m߿ȦmV2V_Ҫy^ "xo/y1S6.#.m}گvM.LQj@]u9L"Qo驆 nw-tlCHE$Nܸ$ĚHÔ"iLmIJM"IP(IN3{Qb`Wc@2_L'AG%q^7&MlR++eT0%wJ58؃<_ֳ]߂߯,)γɦ^0%tNJ$7~m{A~Eыkl7fVqjttϊ4z4@>w|M[F'O6JT:i9hJŠt^G|"EH@5;$P_ZÈ4t< qKf# !j@KFީ Jwj+/PЅ(@i-6ٗ8/+p `BF#*&Eb'FzuW5ՁQa\-69ȌjD'Q `coFT4u`]_R IE;/^>.<H%( .Ê6M6os !cFXAKXaG^p4(29I;YQS+) Ss3yR9.E*3]oV%t"Iz@4D sUo͐Yc7m0ɜBw on}uNO6 ?/)^ilû>-̈%WGN=(3n &Ou$J_hS D3}1ɈZZ 8*&rڋWXKt@ƚ*S$NkUk8.嗆Ȁ c!‛ M&*P=$Ϩ+YpIn6ǟ"4,Pɫ<%j:L!BeR "OD m;^ߡHHn #e؛V5kMĉUV\ч hFb%?)/B,dV91_SLl>?wBgX'eji&GZ-5RB Hbs[]a-?RVnθ㿬x^#\Ɍ{p N&4ΖVN1[6oE䃴 HrQ5ht4@ƝlxO#Euܩm~mnmH0.#F0Lt3NP$0)MX% Cπ(!T^; PX ED"}S*# [?)Z`]ulY6Iӈ*4P6ҝǩ5:d ]pHyBÁȥ/}L BaoF3^HA}3)-A/uc 5HCMx}Ix,[ a >D@c#_/~~/Yb.1;HA)\M\@})M'udFMq:8p#KMة%OE:p  )3nh`&pcbvi&I.-q&0{Tm$Jm3''~ ^`/6s DCep(:ꖁXfRB1(k>d[ui^$T8pT=R_$pLػ68ºZ"VsMmOOp'j a=kCmni/~nb5:\`dEQfai=nko]lvij>htHPc>Ƣ=z8|/?h?$ȉ.0ū(.r =P1GHq Z3ءLw6o\>mMTɀQw'=&re\3umů.5u+,Uϻ\ U$La:nL@B3>t5_<c?*^] c.66\omR͛N N ]H7JLAv/5z}NH7*̣U' pt<OBTT)D AŒ%I XPi>q)fӆy2(Y z{n6}L3Z` y{=dwmtT&;Vly_ف_Ob&$Ɗϲ߿]U hY$Kb|O:& mWT5"ĉ)UmcڧBCv}B'􇣩0Hnme >Z`2H;vtɱYFYHEJ{?L-sd_3f:-A+FYt ad;v*\Q0"p^Nf|ZZt!p +ܮҳ9#S`CEh4ŸLFdbO0n߱v#nTp!6|C|b8<>8*\zPyyK_tɀOH? )DtVy='CѮ={_Ϸ)ZXQ?dL %^2&$zIwuVau4 }Aݱm%iE2F},GvW%i)ZFHCsiF.1.զ.U=ˬEFwivlH6U2g2J16h|%d -vp>Z8qD]dwPS{:۬UڑzkHwڷfSxhi#ؼzwbTe+:3̓y$[W|N(S= s [?𯾔mz+ʨѠ|HL(4L%6Q#Rv!iq:tkvɀLp.éC1ugz$)d+>Ξ))1NVpDDh肃 9{h?kG*#}94XQ6^V&0>e;Amٶ/lP57)^i{1 @Nv%^J!`_^t׳~5FBc"Şc^gQ9U:z"*lN??Ts*OF 4V Oڭ73.ihPA6/sӸCRFM.߹c7BVhbGҌwGrM5"5h0f℉N6|@0`mYHj2Ϥ_{{%-CLg:?:GsCnde 8lߊ. U&"qAhPAl& W_}]~+/~ͫoUUX'j¢Av뉜d?,?eZ LTf{ֵXG| -d!mcT6i ]7p P F[/ Z?U' m*Ӂ/'lӚ_6 <){PpCX.S~:v3q%,_q@x%i[8UHx%ݯu}2 C{6PްYrZ KkI~'3@@-d@@З6bϓᛃ?4WʀVƪ"@ƘQP4lμPg0TXbPo[͋_gE53Nzڛ 緿[nz]@]JF) )^i0ajԊbrg8_DtZ!tk5O`X=JGHr0U6óe]F:,cqu$\RCe!J_@H,tR4ͧCeyfga{寴e6glIe6$}RBd-vD+R7lѣy`i,܈F"8*֦RR jR9Tv3W2u(V\3E:U IDATUg[Aʇ>C?`{ ]3- .q]\S~[EwEIa7}{GmiIc S2av?䙼s xmF~T;tw`$qLj5F&MҞUDՒAl 6hw̌ 88͒ټM@٬@iV5I._ KyYTOW*OBB@Igf R"4n:EϋSQ}ֹF]ЪԨC0Mm\%UR>,tWqCXU@IJ5H@(|;:ԚU*wBRg(՜"^S֎Eo>2D#?T2w:,SXm-kKi0wT:С^$\-G\-mݲ֬]#Fd%#A$˗ 5gvf.QGW|w^vh$y<`f7$zwHL?Pq}xsIzxp֊ʌ>gQVaL:4AӧLHg(Lˣ/;/)nL efΜiO;,ڏnV\iv\ʖTו~]z-=y-[nW>.̀ L|hyH2U bŻaZ[l+='^_ъo@sߏe?|MX3Tdwtxz,*V~VM"wXFNN.j[p܈@AFI-Pmxs; SIE WtC ̃Qˎ [%*Fʪg^"b2  F>XV#׬1#[+[mڄv^]mD9͆l;5N <nzUP3gzÖq|>_h۷mtlA<_ҶKlض6Jn@nmyc tK;$0T6ve3fҀLOA Lw$ā wv؟x/O7,ZϝVzd :wz-D<\iP%M3g؜stI/xr\r_AAU]8P`lcׯ~JmܲYwZ:zҮOtx-+|LV\iz9cs.,=-8(y#!}$I:9qC)N<+PH$ǀ $+8h t(Sa xyioeXrt?#5%z~c؆G=0TX< ௕Y 7f}pTbi($QNzB0z'IXcW<7o?GWj>c 20T?ye sE5$QTe$\,u2dUetlEqpަmz͛6B4:TLfZv̪8 UI^I~O~mh5Z ηB{R#1Κ!ZG h~#M68B4xWrG+x d -wiFKJ]!d-nͶ 8Efz:S*Zus܄qkR|SDsRW@J]8 Fޜm:2Ө8Uga~:M:y5ѥz/Zb(AjW@y-R^]43Kdy_O6G W>[ο?7Mcv+4NL4yLw< S9V/CWK !o :HJd5{=OY 8#Of'5_P[K{@9V^`_5xH6]>~% 5?[%Բ/!R/Vn|8z`!~ze4%hP*ɘ&t:Y̻SR MA Bm2AdHg9쎀'I.UkT%pf _Ial.f:0H< u ^J65* .yHa?nhAߏ>\=@ą4'~rްM Wfs~Zj@IZg[#,{T55N|Pٜ:udفYl\XbA]f0 턲Ń .]̻w#)17s0He:Fz|PoevQT4wѳNR8ɀM˚5@nj;l;6(}Z/K ֮@ iHObϻ;Y3%~ߜ 'YybFAB~m{H DKFq>@d$rP^D]N\D쎯 4+tgj.}*Cc"p #nЫ |t޻&ȡQ:a % Z<;HRk9\1a(F5Ja|83WEBozF!p8ԽEvHԝ֪'ߩILׁy ˮd4?*IYKZPWo6wc?`"c%>@xi:yqtMRձYPBߡEA6^^/⻑^x Ri7(O"(/ bk2mUk5I~D?Wy[luPwS%c:eKH ;}:_o@)*.% $Hg/ux4] 4 }c/SK+z ҬZ%9?‹l%R/g1S08EQkAM ='Fr_)!K] Pzpx–㠱f\"UO:S@1L= p LJ=t3{n|+; 8U^H瀩 (wsu `1QuEtv$oKh/{W ~N!E4}b>F%m}__W~(i,^Pғ$ P"K3}0qå%Mt礨<ô0> K})ħr`yI#ti)Heۚ]=B-4?_H6mJ5]d }VO]v|PjgYh?}=c 3u1l\M`]{g.ٯ3 F朘DGR(X4B?䤔oDo "2 Q!-v>gwka$EkN+G`P960R!{rIǥH"O9M($aŶN^쿹f3ʠ[nQ-WxI ޒX(OWnl\״t @/PbN鹰Z5g)N+_4NI>nk >w9J ,t3 zu=Ț/OhP84xs W%OQ(P#x}V VٔWmoz.8KB E0sMtF$(G_ͽ{Rh=ˤ.a+?˴~ꗾjwFZz2S=՛P8#(<@( L#=::kʨlThA vh]AA?9q:,Ɏ&;} ?+: OrI09|iCd)#>/{(ט5u@Η:6/hܨ/|?"]([!~@ 6hFz[y=9 ֦4y `-'%ghMeAdxA$o|0 a @'x/~/ !E<ՒfHz)) : ;8Yň"r¼0HH/ z4P @KV <. jL|B<F= I [&}l[|)\aiN?L;Yѩ{#6/mspL.v tU*S݋B44Dc]yB0YRxe^B[ϓΟk -+PQ`ZB@8\]&K4ol{ֶ`(B;qfv7k5*?6`RH1(k4%8ũȌ0R I E  @{I}6ﴋ_ y#PBS@Bq @H# " \ vO';]Njd< )KLf?ۖ8ퟛJTZn^ pOS>[(= 7P z5A/h tWpBW#Qx&q>7Q9Qhf IDATH'߉k s‡O$yA6sG{&>uˌnvmחnz[J CKTCHA})J+-[3t2L|!cDH ;m|ں7j3=NU۟dlt[ |`VbkW9[z[DW"PHw/}(BS(jOz[j4?a>;a @prQLHaB<;i!wOd1i鴸9HRbt! J;h P`k QԶb⅋. 鲑Cڍ=:wn?@ =4=T'NiFptKM~"o'bqw?C'7Wt1.+aჴ]  zӍ$~D*Ųv5?|jIv\S  9Vul/EOYuŶ~[w)|t*&m~-]UJEߕ8M׽޷רd{mN0sp!G00bFH9 ;  G&jEQFIWV17ѓS2TKgA-7{ 6M?k Z|23R AԦ8f~ܨO@ug!N5غ"A ]d29@JgYsϴ o՗^aN=%@D󯟷m\,ǽ{eU^Cs09 2v{Ga4HWK",<U_ER!ON]ZW[-WZW79\qm>`4U5e/@\=<~"M=g|AA#hK;dUژ>-?}wtp~hgVo`7,]ciSfLk F^7j" ,3nV9`g0gˉt:umD`LI'.}R ɪYz۶^{k^k_lk}s|~y6s#Vqj9QIxГ(0|GKm_;P01H[H, cBA B[C:lG$>몗JL0`l>J*-#1A[R,UE`Y)._}Ze@Au{Es3~NB>ɗ.ЋqeAZ" қ0u)%A6?H5k%Cӈ4^YKL? ۶Tugh#,zQjܖ঩o֫܃m?ܘ|QQ1kJW(Ufo@QX>f*ajt<huϻ^_3/uZ*6gnɒ$+00_]̙rF<—FyThDPt_| 6)^5@v'zGΟ1I rb)Oci$Y$AVpU Ѻz6:]Tpu W |wꪛQn ]W[[߰O\kE3%E,'tftkeF a `s}ӰaQ}r\>7hhWE'>ܥC:h' Jz&q|jV4GS 'ghua#%yL#qEW3ۦ^<mX–S?lt(fG{tس%0]lLp5 Ґ vTmMP? 'ilƎyqrvlÚ>9LN<#f.]ZuPja,~i@R\wOu 摤}((KZm[<(9:z}0!?t#a)$d[Nr^_zH h92$ݟG,㪘es²'%ӵT}nwtΑb>U ڠgq㕩2Q Ơ(O(eE_lP)ly0H΃4١CtqꅎДp: EE9X5ІY'„#x KH35~ FHlČ5;;](9BO;p;@zz.BmGx)h~_:gw~ͪ.^lc:wǰ8$p5G#T=b(Jl aD*/T<hA& MC"Q*Jg-n.ŨR]n DFZm   O&Qbl-;n-U_QuGc.vhG`EMQOH+i(N%ٜu'כ54ѬUB\DPH@UGU QAFf{6ɮS13(>x|O#5L$`tr$Wu&.5/+龧qB#J6q_+)42Q8X1؈*Zo J=O3Z:r DUfh)iPgժilK-Pry#(&!iJ7U8:ߥJ:,ؠnF:u#Р ^ vg%ׯ ..SGlǙH؍#Prہ_}OtDtJcI@2Q\׉FGaV3ÜcQ8ŵMiUyڊϓV0dIy,s%cOy94:]pƕ̺vnҡM#U8"ٌFJܧ[+ 6>oI#GuKgx}!tMN%m ,#}pXa{6jg{pw Bo/o@M}Pu<>||wp*㡞2jRqԵ r w!?ܝ<2r6HI&eܗ{鑺/](@*ő4GsyfAp*!C@ t~-6IK"} eұv)aηNi EJB8+O2ogs?x_f WC 3`%kG/A%hN'Ji"pFk: > zS8$vttt Bs@lko ҈VDT&q9zхݧ=֭ŋ<+pQ}P91byhu)&HsuWNh 1?Pg^IMm$ӎBR?NW'((e* V+-Om+f(fۿ}jʦ^vM|Cr$ l$@Ndc"IVGDiBlO~bOr3Sa[]颾W`A J{J7I 0Bdo Ns+u # Q)``ʹH'}[X5."VۨHы@*S=5P|^\y_PI+L5XOeuP;ѮRc#BNuXHf;_F]:.N5o{6$@9ĄA!8Hg@n߳z5Fs٢xKTVtWvϛ2SghJ 5Y up`ڹ|DpQVh4,TkrG$HlG "pn? $*<}aֵ+o/+O^=!)4ű>g!q𴷿*/̚W*!x$`8GyPM`[e='TWpKfR&U 1zL\ Qj2~GYyݺuc-Z?-.a:+Zҍ\gE5aJUX X4u7ýCrGꅏr-nݺU_DqݴqNqRuy ͛6Xwv&)VB:bz7ZvxM}ӹs45w5); cM/"GT`gID]Ds˱t831HkbLn^t|_ I;Ux/4Lzb}Ǭ[\q!ÑKJ@NĔ ׽u b"Os)^.$>4bL8o?V$.a 0\x2S'(#mOUH*rȿ*{ o^n ;l6!rBwk@]b2t֭[>2?cm9;HdB.^C zzen2֍֩S: 9\J#u: :w&nrаmrx945.S:я'OY3+eLuƨ}ȼSFk]>m1t9A2-vl2TT~ԥrV)hb6?{,kCΕ恽:q瀨Jyx4@PP-4ng$hAG$-D TH88.5}+4fPԁ&@Zʗ۰ ZʅHOwg*@TR.Hރ-P9ʼnc8WWFyF;̳2tXՄ hL#ƫOo}ᇭJȳacAhP[_o.ڼeҚ&a ճx=&C%0BO$h5$MpHzd@l8Td^tX-_RcFu- Ifp\N@Ј$rlyT4= QÄHEr' ^L@T=;:uk$Lޑ̡e~YN !,@ٙeھ)peT HسBc|0NUIoR͞6Ac9iLVgeMsTtZW:@:$ ͇ ԰Imf۫k)_x'wujPiبNUʵC<3ub1sM|sΣrfN /'I [N<[/8ᬐ"8Rꓱcn suh|"2X)@$BMUpDb6'bxh4b ?/!i#aF2Dq1Iʞ@{Ψf.n+g3BǦA텲7WIJ8wU6Pӧ8$;M|tP^:ذztY~ tMHPOu ;wxD )RdF`]Tn1઴cGF1cgn+eI/-ieVT*:G`UJ3Pfv X_B-Wڨ'jт>͚t?xUNk*_Ӂf; ՚ev:,ή=fq XEڸzu֑`ӡARDfi&8J[!iKІi q T]met3 F@(wf`@26&W?iWeYQ0ZURQ0"q! `)ЀKXGA6)(ay%6OSb66X] 82yRWݯ:Ou f34_Nh f͞e[l f؀G򜉟Uևͱ͵uvFaէE" |eu$?m~kTF*j[3Jm`bÀ&S5󣥐s882q1۞]ivEv^a/[z%$_zU5<w[a $ڱSftZ- Iy|Z1<Tݚg@: htvY>gC80 $#`KP e_!BOΠ{$I/ٳg۶m[>74nIa7mm.=x)F2 ۲~@W0ɱSv.Pi핉DHEoCӰ.sI$-2 JN ^C/]jSj4KJxāNB'ɾlϣ8+}q5=đ9l˼-7kwR]S*J.9'.h[ڣtQ~M5O*JsbJ9 %9JB9Ȧ#1 +ei'yz4O>%.VlR71˘a]ĩ"01=E{􎟈]R/b;Np$ӎP,g7 22#YNjRI1Qtr)P4+J:IDE3jD+LP|m$P~<}TGh Z (swN<$߃~)9M<%,K4۶yBo0/z .s_<콌tz^t+lC^=[rt`uA z8$I:r1to"|-ޫQ{79X"=>U4>m͜9N? :}c؉NXdG"ⵯ+W`I`ι{|j7v"8Ryt ^tI,=|}J%4~H.јGɑ 8L(!Ѓ{]%(N#YI0#AۮALYd5Ȅ QAp$|!9SU|gO…3%˲R?x<,@ z޼btd 'ǎ :}v}_Uox&:30<)*Ɏsz(tL ob|<&ynLIs4Xsye2N)7zy$z^}啲I۫W=Iqh$.eڬ ?*_.EA0%>vJ~X앥-!QԡDdezO4 8!ٚQ}aVMfȷ^J{w4g~Js@QΤ./%bЛ{߄ȟ{ӎ$zmuѱTiI+NO~N_y9{^ 珪^o*wv $ͦK t 2&n5$ RҎF)F]: CEGbZChH@! G #>Qp5dp÷o!@ BRNJ=2W\hjwJ} z<޵flɤjONpbVz\-C0!?Fx D_!M1DfDzIܳ~$qa5έ^00} #y8 qFs zZ!;ƀTJI8G^eݱYku_J8F)>;SX"ۢ킋."ڜ*w ѝ ^ʲCHfےel=vZOsc;[Ѡnzopw{$DҨ&iyğ`taLLFH x9DO40Z|VOI>h_öS9b; - '_dݰv4`~P< (=!q/8R1zt`q iIG\+҂V%I%m ?.a8FVˈ$Ej䨖3f،Y<8:h$A줺dزܫ,Z8vuv/N|SܔߍV)mEZUIH ͞s6Ht{Ϟ=uo̬YC=COڑ!)ܼ[NS^$+< Wᆰrْ~X'2RcKm3 "p!!^R0 gvl%8}nv&c0"V*m5Zb! Ip%ZfO`8eEu#4Abڧ,u}Mm>]Vsj3H \8'&~W,#1pCusYE]qU>hɒeޞ8DF,e1, śA+/~7Q+И iI2%H/\x$c.a5w /TKw <#9>32c<Gތ} D+ =)I#UVj Xb<#[q('ŭ,LnZm՜'$E ꄷݟZy4qDl?X:1+iJs|9/PIucz#xoU_q8V4giwW/]IӠCȕtl/ThrHx_š0YG+7 1`,7͟#*Mwܡ"wH)5ҏ̝aAT&fw_qrWkm@i AO^/2G[ÒŋEyc˘:#G|<E{ر 2k^r,B#oXt.@)Ճ",#GCUF*C׬|w]>iq6JR[g]?5C[V$07p]סhϹ Ww1OR'O>D9{#|"aw%.̟R0٧\3;^-l>8,T}R/xD 8DbS:ixxw,HZNT"3DF4Q(z5a݆ QG'@ e(=F&p'Z(2y/̉+Fqx&qJ"@ n79` $ ,$l!8eY30fLT7J)~1i:M/{iXk+堻kw\J")|4c%ŋ^~ы$΋GGFQW:-$J><IW:!);4y)$_Ño$G:tLRgq:)rwFC]s󭷪ԟ܋WYdbir.(lظ!Y&d|F^WEI3@8ÜbFg=[ڊ_7I88]'p)$HKW!ǜCp9"Q_*8`wG"6PS"0&7H1_f8Kׯ7y٢ B>+i94w]S]t?~cO9pEUk΃0 |bDa,ba%?%WRٵ[hP~q?99b\`)6fČ<5H@>ůz*+$/_R& ^.D腡;޲R&i d(X=pwzJ(@xG4GHwdqH5uFU<ҠJ`w0p zau Fd'Ц>,UNI~c$eRý/ G}PTǽ}/$pUCfUڸ-u8OhF:86ִǙfǙ=Q#X3#ynq,O03ɴX;gIT׼޶E z a(ÈEI21 _\ $< _7>D"wģw.?=˜y3 I͠%RJ(cEj1'`ɔfkZ,rJʌ {HNHԐ\uGd[a5ڻ+H̨@ RʙUK x{K3ewG+gol25-ښخ}q1B<=CocgjI~hY`y"R|ox3,m=?(ЇlA')"ӦM_rIغu-Qtcè0 1-DO\PKqޏ9a_,ч!+$過'.~%^wS孒02 H" ͜lL䉹 $,务0Y;Q_6.j1Tt_b<={ Bc>5ZܘM JT(DwX+L/Y9A~I:T&>=ۭ̔?0i0ed[=zy~g0ɕ-Hn+8D_իpktH(8HVsL8yKD0FT d ^E@(.w1,UQ XdIΙC^Lh.LK8ՠ,ϝLUH^SY̚YiP%'iS_(wIRL܎2K;,@Padzﰒ&z.w;80 Qas $ب]55>?\un7a)Vp iv0?c~G?UVlb)iAHq42h z$bCsCvlkȨ 1 Hdz֢yb7L,ڥB}4.uetɒiP9V2ԥ-v({w 'Lޏg?Aa!! 3I6')Fk1#RsoVgy0;wNx>zc^KDdf5 AZE>p8xW]m;eW^ sB!ͼOңET@dc3[?Q:#}oɅx`J aa>M!Tk#z὚ +6Fow(ke1IZ`l:?E@YmEru㩋./:mH$&M a06u%L: A.wp7ì8j쬗$x> ̓? C80 E.*@^/2U3LO,CY<đ't~H- oi)s8ḧP=]zbEʚ>$jC$PmGTq ~̵3LSYI_ʍ;shKS6nfhY4 )XzamwMGbVm5d&4\^ nQ*%r%PdB\Y3&pܩ6Śv00%uS]XVv:d㒜ܪߧt!ü' om+BR#xأiڤq &8ڈh,A2Ql HťY?{y=k h /o}փ\0^KXts<@Z?`,3Rc>WW j(NA?EV"cta+Ze2lnp|d*APj%2Arܩp25V#mLj$o%]ZX` 8pMacPDfsG|<"씽ϭ2 DuXᨛhCvNaLDYv?3&tq;]P-hlͱ yxmw$a+d: :c{-w 5 1!ha?!}2A [M}j7hD#Ed)RV!*,P&Xw7"xUù:x'i 3\ ,3 Z3fR){HpgUxsDRp)߆;,Zt؉(.%vK3FW@~y:#:G:q.hFC0x J2J „ IDAT )_;H{pȾ4 Cg,I:m~ FH6 u"}PپBIP2 D\e :zAl6!G I/k%ך<>Oh,K0s\FxPQj+" [ I9 ;;QZ(1*+4UEg٘o>rD {Y"q\H;vlq(W\9_Voא|vQ!Ez1 pJbyޱD.HGc@, L6ئ=.le}Πpm䨻ڛ}kgШ?? /Zb@gdH] ʫs%}@0Q$xGLF7 K wޡ3`<[ygoT@M@*I'$oPs`B1 s11_؎:_9+ lSf QRlj ‹ݙC_AX =5\0R9,+;{,DUDxN]щDpYirxC10-@Mu[Ct(FXa#zD9aQfzC6,_ՖOp4 gց^t=>iC\>y޶}%IND6uFde}K[KS`gqMϷpW_v I*x-ᜳSd&eƊ @2خ)O>Zq8e;z#|SLG+\Bxh(4G֣a1g(G >\ùcBem!+Ia? `XtYظy1#Ifƺc:0>o/аPW-X_QPaJ wz:\C8:MEBA^@"̵ШrHuvR]XO;E CevFd_y[tW;]vqQ Ef i,A"&q0Cj"!d3"C] 8Dۿ_͟tKpIE!fxcy-Ñ$Cvޘ ?4H} 4 Qf 9+[%bE"PnEg3:uJ&hGWt|eJA2Sj\&(gh~5 U  )%tFDlbb$56p9 Z:W[NjڟVZ4Wv&;$]M25\wn҂>`mK5mʜ^=/YDGη.%{9C!uڞsq2iOw4$2=p0)􅂸%Hi?duZKڳ'צw W:+z/?t[@ ^Qkd:0S7K{vхZ͎'v7d!jbqF@te 4HjD}1LhR%: KUJwjns _ Zh)rEa`ĴJ|,,`fxg InĶe%fi$4da&63Wt Maًꨛ1G{kނaCپ_9DM&JUz*L7NqaŊᥗ_n.m;YFrx̢, mhl-Z(}iޜwa)Rl@l&itjFׄ1C pSG>0Z1O EC[aTRck#t[A:$-F1'ifw$L` w R! Z/xSaᒄa)RfX O@=4M^$ߍ4S\pQ@:!FԿkяjgz`X׶ƊUJ7Վ B -AH>RaHWfRiȃ4jƎ*_ 9[[ojS7i!{QfckԹhWK$H?ǟ۪clG Of2)#YӯD|G@ a=#)yzEiGbP  Dz3uJpΧ9)y"YBO-˯Fb>E?t&s@te+Id$Β¿+ 3ݹח- / ~Z9pmeX.vY`t0tu|&-0/8 Heڬ=sXU_x`1@4tew&:n$_h  X!@D&/Z0k@G'P)޴ichyeυTE)0PEr< z8bE;$d~&W8rZB@jXzp$[yȋ R/ʾ*;tQ\K8F)U3 7 NQӕFӕILǪ7"~ 8jTFLPgwsf&1a~-?)|XVPfl SIa%,DV:'q'=U1=AJ+O޲$H_@rN V*$qYʁ4/wGNS)@kwpUfFy4hu*CXQnW`.Nut`y@-\tf_5d;dAy%pEq8  0Պ+kaogJ;XA^ /}07>dW24͌IZᥜI5#Xbĸ%2@ |$Cq<<.wHH82 Kp f ɥɡ!sz?8i If ٳU螫U]#A"]5Qvm۵X2dixvGU(ϕu:x$ﭖ*)ti+yo@“O=B҆ m掼}hϠeA2I\Y-bCؖ +R TH ﳥh~Ż ӧG&58H@|-NցDGԚ4F_MGsc:DoAfB] mZ-e?OӓO V(A2?)Рx+:]@ Lm21I\js׮YC4Cnj HojSEqsX>">.NxF]{CZo1,  A;ky8gi9 /o|tthI3@Hɗz޹ɽ#DSξ. Ks<0R+-V$R߮Ϧwyk"=}ŒY/V)s\.DXYN@#O]G)Dg!J1v?#2 /l jdv]rKD|>ܬFL-(w v*4[/epJ6R|{_6|oXwkT4[[lǟqXxqS~s78E=\}d0]e%I *FcoETP,5dAʾ'GI$'~Pι$,_FcL%q#~"cNTgb=GM7Nk&ሑ}Rb _TCPf( +K. ښ*OZdF$ӾSlۺ5lCGNPZ64eZ:qᐤ}с!Eת~lAGRm7^13TlI&}} dwJȴ$L5:3NG& GVGU\ )#p&/iwYduvn0Qk 1R8g*X^1V0+C$ ݣkb@F=a-b||cŒ I+@2GDhšW=zv( Qqea]'%Ow=RapB{嗭` H<ʶQh0s{@wݥLᩎ}ա]!:XJ9M'-!bzHH.-FVߪX{7D,fQ.Q FA~'s6:;~I50\x .4-P'DDL$u}8^DTGK<߲IL:@0=(*!/w>\ib,'I=`yi{bd$̕+%$F\!u=w2vCٖ WN~| J9wF9#*P/lg۷wOh\kjKGg7jY ܳυ.\2\ygw¢EΝ;ıw타2Z# 3tJiB)<g7]U;̔H[7kBsNdϜ93'+͟Qaޒ`*FfzL"\̓bB_y8&pz-H+tZD* LmrO(b˳@ +f ["*7P4: RtnOFq Y"_޵j~CI:m2zѠ-{$-(qaqO%vkzI;|Qv$O=~Zi׾W_O?C=TʞzNWD!pdahBIwu7"T{{qO7tI=QJpF780:RH~])?L4E9_X,̉*U I"/X+߾5!}mb K',$A8OR"l=*|O{ /_T28+,q$3L4BA3 IDATΧ05Zw/%\r<ڽWxiq/L7spט1 @9iqd2x0L56na]Ev(zR[ciau@m-_+s^jr0 (|w^Kr<:šaYº;&8L("F5|iH". -* z ɼd`T9S:7s(Mѕ>IB7L<8#!։{M7C]ѹ++!dHcQ'IJIiu 0Zt gfa0DšP!4sLMr4z3V;ח, eeKo~]u }P Kd]/6op;Z`>*˝ڍ*w=iJ$ςHG:uF's`??̞3'KN֠_edi,UV:8ZaB%LW )&@ٳgG "1cN!6./24ĿCĤ35i?n_&Gm<أW dݒ<%-WjDXS;Tdtx.\fV&H ^9FZ[&.e|Rnm@_4 Z^e 4d)@2s5Z62Q`E2&2*a ˡ1) IH,aA0r4*Α(cC8"g,H:?t5566GX:ê5jVs4䝗x0D'5۵}[x񇽢và]:{ƮDI uɓåx|^ (IHmܴJ۶P}Ns|?UMX)(( oS}Q͞5;\-\`boy)$a(+.]mF}kRdцf_oY%|}jқ Y:&$(_3PCC0}V9%vT%bc(qW2Ѐ/tK5 < MHpir =!Q2j eg̚5v jL5ŹڢasO?!ZyFtQ=%pLYqqiJ$ML/ۏn:eqv$[!٧Wtի%aN]QoH;ZG9WB=31E9ړpjd (_oT[*T}3TQ M}{eHyԀ.Z:,[\ <7IZH0(Ȝ$՟47 ;up2eT\ztߪuhԼpdP$yۦ>Y pM(RD =aJ$<0GmI3τ)OHzוW/z/yƏ+pxtl<en6`b WUwiD>UpY. ,R'h%Zj_ox^1F_4J>)wZ-ƀ=D]s] _B:_dE2=g/szǥ{=0veeϒn>~!Iq0!_l("9j8CHqp@úG?qlAVօ}4lܸ1;Be2z9[帞G9 ʲAWb1Ϧʾߒ~4g㏇ǟxBYZ>õ]tq]NaC$7mem:H7,Q(.J>v,zyR$sS' uuŅEV6^}G#Mh6n;Y'zU0JF5$$M(NJ#$/{t7V=i[tivcx\|.& @Dąމ"L}Eo nIqamڲ61NrND\=jH`jh;)e9sUaXfB]6nX6H`b-Q[#N`HxNhs{&&ؑ,^W\k葇Ȃw8u7hu+Ë jp@Ṗj:@q6m@qst-;O'iNEK.8ϩ k.?`ٲ2KwGm_R?gyVJ;LCظ₾+552\Y$ d"Zڢ|Dxi-},5)[6o <iOT> /{YG4K/3Lׯb=֓Hf4Iz nUFޣEweM QkȄ1b b  Yp`Q<[X ~~|d,Sg!Z?>򈆫˯2+Mk/2fxNNP~f4͔9ׇy01yJɽ;gpD׮Yl@7tz#H[:~ue Tyw Uñ4Ø_$zZ 0`8^t΍7,`8~!-\14RCbՆK٬=' b1QUG6z#WIig :e ]4$zM*:?px駬"sLiFeO=WPj=y`䙟=op$w]qU]Q}?ha]%ɒR[R5c:Jy:$O"%\>z.`k:H顃#5 RI[䢺c!7ChU X|[ ]#yavYʰDb'U2y<`(cȩ]sW5k7}塿~i8AY׉IL͒;4g6u=whg1,J/GE엢3ܓϞ5'lg^qpMp@a{;m A0M#&V2'o)jvtMd1KXnnG'?h9&=SX/vV,?XXlZd :0 8Q+u(]_C2U# ҜsZz%)$UzƀmnV͑"H[8q3ZN@utL],NO(!5P_Оsԣy+ <$zM˥tٲަ6C5ypPe5ք+VX<Y [(Cql.rvOֈQ*M勿꼦|A{#5=gHq(gi?k׵uǏl^5HvWZhPUKr-A1 =xD85 0}ƱXaq!T+O\C4|Ao;t|OUyTҕK.M%@obe ߾ǭ\"LXM\R(q j DxĎ7MH3d= ~Etq!>YtW,9t`K/jo.[󗳵1a:&Y:oqگ>sܰtabP/"4P-4+n l_u\(OE^NӋ3p.wE+!醾,zcht K55[\A8;ڭinIm:mjEm}3_)CKx?3RNɉ$M&Neg)i9gT$Iڐ[c=N~7£ X'X#rݸ.-7%^J]&xbBG.H&ProT.8YtWS-M\Whu\LOznfP>?=.s:dBD\&'@9LIv3*BM<(+䁸<:9OymqkL/ hֈҟ^!&FGʗ;{$$)n2JIr Y- c1g=RRcmMmX/{,s0=G e@E؇=s_8KCNMzn(Wtﴅ#EMzڳ6!~- :uy<-A Y[niOWL Fc^R >T‰4}Jr99Ο#d1cADT@04=@ٻ=䄾=F1 ^Eq;Ja}ɂ!q)m&N HO5E3I_2;"GV;$גV8:6F1癛R0yR2r=yC9hd,rUٙV9`(յu``7OMt#5b}jcRajw5*䃽k7nܡeuXh]Xbu^\1YSSI $Ie[1 ? != $(\E003Cvu# Vj:BqߨDYL;! ڸ?\sML)=kV@<$Hy%Yq'P2YAJ?/w]2'-[>EW6ԊcU0"A8].e@g>SP\wW^~D$#j(+59Cmfc@wNT9,&YL.a`! URy=9 9LCpˀ;^-t\It:R"L A>4ksfhQFSِ3Scprl٥HG w5@ޗFt"?3ԁe>HS6*#HIuɧ nt8ׅlkBՔZFz7cWN=q]tt|$sS8tޟl*>y5-,>o(@flrTFs_y"&gk5+ݨ{KRQIX0OwĴ(OoV#0 F$0kJdHKY#d@ Oq9ѿY<5prZ#4ۢ#Ie4p59UUhjoڼɋ*G|5腺9"euo3LW#twvA6A/4cmB !IWh 3ca$M3nI O~,? c/ oy:N)!w7KUszߩv F2 9lo #d[&?a')˳MA׷Pz QTk1*6%1Y&/Gy6kd,{e-f ĝj$ 49:fä6M[:ٶK囬-EqʮCP82y*ȩ#qG, 'GY+ Lg$,<ۗ<ӷ])cpnU(8g?^|9LewG󵿔zH>`sHhpwLѭnŨLEɡٷgD7 $0d4Y+ @f]+0Ž@ Q!9Ew66{ʾ=[%Zi_zvMטaOudIiUĽFIi3[+BW*,?3̈́E#trkp/C&2\ddksKYasݪ(= 屘"i">{dG(e&DwEH84}Heu%!7{,Y~/f86tM):T0VYt$4pp: Aa[7rZN) Fǥ+eY_y=>oٺ%zk }*]Jc=S WYBf2T#H(\E@C䵸8?՞-|٩{}ѡ42FpJ_ 3ĞɹS.fu`X["iRaMƳbt));7 imO`ULKb?6m^{%J|@_j[c֋dh=[p$}Ko( %*YfޒH[FwIP)HxDtƙL sՇ/7D; LLDBbO\|Pb%ag a $r \i 3M1O%p5c]<BUq-ݺ^H!Käb[H'189XC3F@S'HqN76rleY;sUz fJ#vBTy)Noph"$0+<'tFP/24Ć I!ͮ)nݺ%({'$GJ5q6!1jEr^%A7=NEAA 0\rIgU~0dA#)\aH~m v#x`X0GF P׈‹nj Z^f~2J̕G cb.hrTD5hN?utH@o57 A2F"VFjL?q)8G D`9^ E?|{:kr0qw&*̓e2td@GK άW1Xf%3T;6BU픨2( 3Oi;"]1ojfN S^”TYita?%R88 D+_7ӆ+$yW`h=/y"^0%JUʣ@wH52Q`Ii(Zp8"gΧ1=b;FvLĘh(`K=͇kK8n iHgZcgܸah_CwV_.a,H&4#؜/1Z"izLKz Hz&jjB₝:e{N_O?Lhޡ sELL-D nfLB۶mMXJZs\VH LJa:sRCeg9N›gHeXG4Dщj e@*٦XآX}ۮCS?c3C1Ydq;t|Ji=42hźBGll Z勒#a(aѡSQ^_[E$D vhsφIJ'),OW.ZD(M[66֠1QZ(c>]^LAݤM~tfÄ lC =Ӹ0ac" DxDs xp&Z1+5u5-lT~2zb2Yz1\:y _1s~3-Ny? /!*ۍP%j*RzSP4 W,iy#T;a>Vg#Oj%LYtuT_5\TR9|%dYy%pɑ=o9=RW: yϟ=Γ^6>`-ˡ1AYyAx3ǮWG}>,<4L!8hcaT)CEd.JSNbC](3u4FGr`[QG1Rpp 0;/=tHa R! ZI2 !&&Šgׁ/]Fljz> Q**x[EMdP=mVФ:ʖfӡ* aZN<^5/^o*8ҹIGA0\amr&'HH% ,5m4A%Wi9['_ghT&9cƏ Bމ @Va(,9a)x8a嚵>)ouC's&N ~V_@ dc 0Jfs5^5 S K3iZArL]E][Ш>7*l΃š埇;x]+[1EzSr])B]+p?xĠM%p& *jåNSdž*阶>Ε-cG\,FT,S~Da޵HvԓO#bĂqɒfئm(GōlK'Dc2y+9N6Hn#Co4?9MЋE0*V PXaRfŽ静Rb߮c\G[orr`p0$İ~ʲ-0 bo{C%-IGuˌH}uEEuxY+;eZIbsTջCua!cCs8$DD;>+# JN'#-aVjjES0T#S ZsI߲#u*v+6vK8T'TBd+P#OClSS2Qe$skEEOJ[emFTh ,)V3݀I*h3kCC}ysEur3kBq">ޘ  ,aH -F{:HI~CMl'qHgΚ-%a5sb~#(B)_g1 aFi~TIۜ ;ZJ6 HwRVR8'9֊jvҪC>ŲSTa/P*YY-̳M|L*Q5])nS=Tf< 5RTGY)wjXʢҝ {4w2\-xˬ;V\tmH{:-=7e: j ?J؊ܭCS_0axԷT&?Hؔ6e"W!I״Sq{Z}yORFjQm3诃B2"}Q|A&,WKce~vl ?TRKJ\s I$'Q.R;':VkRbJxIsg -MtEax.na<^ k`?/jtCן+RXao:J+&p .Fies|P!;>ܲYCSaJҡ.OX)Ʃ uhZa%G@Zx>XMx/;ΑzHPa_Hè`vNܧ͇ $G)u#YgϚ.)wb&Թ wHfV9⽏kw(l%ϤqꓺiohY^&Xg+IaU=UQ? l!hһɊeP#93' i&zMTo[-`Iu#.{s? $"h!-JTqF܋㝿q$5>,]$lBL&%@ oQ=gvE*0_X?y?UƩ{:?kppjѳO?c7nA@A1Sap06g3RYA6WkE%49E2܈܏ngoeqћXF. z pOțϳȬY$R>'KhMpZ! NbQRz]- 3/X> K7dj%N9n!`_z2 8qHݻ8 gC,sB>q];ث)I:!jMOҒȿe\Ϊ0T҉8Ey!8:(1cyQ;޼qX,_E~g &M߻g ul8⠈GB|-)uW/R[U❋وյh_=qssU. zXحtX HOc1bv2>Gv sfٯ.鳀7DWUՍ^۝~QGJB&?y|jM79SO>맫Pm5-vt$ks4[|vxeE Lq8`< C$h@U1R/[߶{G9 xVWORi+/}Hn9gGI(,Y У ]lj= LZYxB);@z*oJGRF=u촽*wMgS57ySva;a\ ױlWx}*S-Dj qȮ'-/ʅS^۸h#eY8>m<ӾUku''r_x@7Jsݓ'OqNMsQ\ IM -B|┯{Ǚ/bY^ Cy /bm$}U;^h34m șSbތ$Hll PgNuTXk/}w~?7L3A^:XzpUy$H.]fjuPxG9cA]@# Lj~k[LI?R82N(GE+Z!MsWa?0/ck9L 4<ɓ~FUvy>̈*lӁkik  IDAT'sIY&I ޫX]Cp%'dYaHN:0ze8ISm]uT8 @@7bATH1C.wR-'ϙ(F0b뇿7etTa2yWk]x{$Mm"Xz:K*-zighmvcS~fG!@= ;e$f8io:1Avd;DuP PFh P8~L?ϯz-[A?ɪ?/m;MsH3N/|@UXPB<NvǬQ!]N?"\zEq$伬Psql l@OCGw-VV0o F& DYke0UZ`L76Aʚߏ\I$H1b3F=ٷ|Xϋ? S{O& LeǂO?~wo7mQlu=]t 7jNitM{ aӛ < =~@| 3Njx īޏBi[P䈋̗1ZFY}6,.KM顇ķb~XO/":zaBiC٘!<?/mb2-1>03T.] F7-%\ ڤiv ,eOjpqS=ʆ'}߲7I|Ÿa06Wc*+W^LdygYqL7,Hz!Dt k?kɢ&}Ӫ2B*CM!=< Ã|_er8y*6GЌ7m˖/P2X:8R`Vhil}z܆dY߯9y왧 NCɬzwOݲ?.m_=$ɟkkYsE֫ P,\ߪK:^ Qm(_ s/  cQ,<1]<2|( ̏d_ʗmݺuVYk105M#@n銕n>*-oT%M 41Oܭ<]e7 =27tK ;:+k_ъ/9PTCE/_ָڵ6oH@xf,Wشi3 C驳gl"WFIAU2 6xB0 s (q([*pRJld2u˫ A@gAs~ vyI$OG(a+;Fz?G]fE"Nb1r&L)A^%!#>2~//P0Wҏ,)\픒fƍc`!ߐxZp4"ď@@7rHz @ kC&x9z k뮹td@9 ѭt#6{172Ԉc\vKnfa;h:Ycoy->n*6C$bntK." ٲr-p]6l+r"j7Ξ5 }w6>2H1@c Îvb*/DQ~y"sdRbF c!^crDc|%[v6hs6\FqxBO`"G6p|taHAF`DX>m}N@FwYjEFnj3̰Cxy^3*9u.\z  Հ;'=#@\@A<'|3Uڎ׬UcI%GE.k%vѠ\slaQLR8sy쑼~2/Ǔ `2{ǒƕSr;@D=xxr&l ܫ鉃׮7|kl㵲O{|c~]kɼ\oC| ['208RzG?@_#q6Luƀ0٬:UpU?dҖmzS)?P@ҡH:mߦA+!L|o6vlcľbMZق-g.AU$cF)?(h~٬FwD{%>޿@-MM[d-[ܗҿRH@^wfuÇiۮλ[N, vʎ&#I+r%i3[TlV/D'/+,(>[T3)ex9C1uD8XLru@Rm~Kd~WhG |o֞NVx7D8r:if[qs*۟2u-]Ė)#H]{,Obh+#4FɦlvSQo{qn{?`/dQ~.d=ϣ6p ׸Fŧ?){饗m:,p13wͣSqsf ^@k;95 Qp08t;B 6*|Gd;a @gπe쥋8Ԇdx'*g>V,9DI H'&QVGFp8ʹsOdxosp[! ÿ~ 9H8cE<0Uy6CT/qNrVg8_@7OOK伻=G@($0g$%nr+|b܌~;m8{ӱc'I7 xM?aoɓTC&v7Y]\9Sc<^Ư_RŖB ħpaH Ynqt`L0р-J8a#%~ϝ;ߞ{} #{at0 r6|9NnHo( S5jLJf[&'w2?y7’Rba:5kHIC<dScڵ ek[$x) i L?Q_| '>DA)$#xA9{ɯ V@aFe^~-{R|VFefh(.1xY}1:[@W>Eϱ<9᥽3jH4"Fklj۶3ND|x̵ڻמE[=؏9ptՑs#NﴱEK6Yy,HgJJ9 !jywnjeY?Ok&eGeЀbHnw1ARi5Am!W_Y>ao|ѯj [lZlh veYsP <{md.2e^̨uL Qq(ihrE^qx@֕V@PˠRFyxD 4uVZP!s1pnf˃T c[O}Ȝ&,Ss@%.MK#qSy1ʅu$1_Gz۹iy6ۺu(O=nW_xZIѾ!%3F[y|J4C{<S.stEѻFc|Y5@2dr` rZ@1 .!ef4p npm+rY j0 *ďxfKt圗PEf7J^_q .j7ߨ#%0%< 3}5*vQ1yr Sgp9--$vkC[{Hq1)P9k?z(?+Ivp(Ja̓bl0B<$/Nה O2E`U*/Nw8ب:cBc|6~4R&-efcݘ%K_llOo{*ҡ22 9A /j-/ăO؋gb&hW\N*.\ ܵ&b{B/8R\o$bw1A|ʮw+Ta%&MJs簫ltnpz'rpjqhwu[Ul۶ժV{peevUL73+) mG{#ƍ(:!Gډǝ|L{Μ:m{d3列{3@/@S^ =JisJԮo#5 P PQ$AǺ_-]۟J|[`8!'! "1}uu 4Nvrp^A>p28|bgcw7mf;"U?-`+c Щ[(v]m=bFFˬvE {̿lH@ʶ4omP7w,ɒ. $6b2h*MT+D;?_րSHx:v.yH)M޲J6@b3 y 7j&۩"M_4j<{!WgoUhh̝cӴf-NS2:%l]=}ݧDr|l*>+ԻpB"klmq9. @8LXG=Qua?(q{oتկ"(Ba,rtGWu<+g x,GHcB`Dq"DW|re:$ mAR,k@+Qk=D^iJ%2%\s̮={T*ZGα&Mt8Y}qMFm[;V.=^A, qs.|;"zeqxh٠ /f7(_"\x-irN)NIU"cc X2'A3͛6i/]ʏN\\i0@α .q~ljҪߩw =d Jr]G1q멪/jZ'CHh/i#*sL 9tFaÈ|QF%o8OHb>tp5!]`U'i)漢~mngnjr.=/_ܫ}9ɾRˁDAW&iOr6 z\ Gx-7w5X|qk"@@C4~=CyH0 / b _aMuv.DQFh 6gZ(G5Gɦb{cqgsƞ={n}Uj1skayh3M-\폢.28fƕZs 4jy7?KjI}v7 +mn'e:AKfG?{INТ> cޏL pq)8ɋK $eF96I_wP IDATh^ŁSX^%d;g5o1c2E~VhzEߙo}bE >HJc{jK_bCJ?d_OWq32i欰7_QjS1+pjUti͛Ƙ467ʼP!q $,3ќV'qx䨑r:^9{{F[3)RЌ\dƴ:XF@}$bs;JL@4Wƞׯ}RH˧RNbYK$be^06 o93=1b1, 4$Wᙩ(#4-d>_:tH yh_A1fq9ZhU{ߐ΂LySylN8~l.ʞ{Vyf)$[L$j"qҡ1{ő3grweyQ+|>O9J R Nkp<_gNuM}蒥`6ǎ\),qsk?>(S<Ìb9O҉c+ 1rJ 5MBYAjW27# (33%Rc>4M8wJƜs u<&| $/wQ%Cqgi3Y>wsmXbB~0$wkn& Yq:GqgexҞ<06xܹoR&EMbQbz}*iN5O_R#՞@<5sր3* 3CؒQa?:]XO},1&&׾7yH.8:JcO I"9Mmix|kg] p3x o(_;曛K)۲ȉCD<".ƵMFvm۱~;S2"`mN wmUʡZ*y\.xɋ^~zGYacb񢅋˛f} +ɼ0R>I8ה4OW?Q#KM݉SxsP K9 ZO'O#ؤnLx~G)v-{n8hFUy͘Q/p!DpIz zg>cq _ 1纃y1M?g짩Qhq)ªl7TϓqӦMu|r'k~xP>?{T%'TH2*0Q jgX{|-7< uܧ}b0KJ&;jǽqҀeqP`hsmIy2 v*ppR6sg*TW(@rX~F[|P'rR9=i'=qrQ;믻^{ߵm4jM&Drb+Yg&G3gEt@E i۴F~nK+3|GtS}@S 6ͷ|@=uJysLkoymٶELsypq,G.(N"AF|8a/m4,=w{c\z(@2{!nNLmoys<*eX}@"wP0u0#P14*H6$Dl*khmop9"&0X:I3HJ\fgDQ~ǻd7<&,1Wlds}]yț%K]Ͻ}|dԤɄ2 E@R ףF2[([gVFk9ub"H7oC-1}C O-)k͛J)@"C:IU HGfi|.]"mp8̚8 ұEp "VԴ[GaSiH5";p 5pi-(B)HR$]A5dpS#iȹB Q sB38G\ ۵6;sCfy)S\b:GdyC SlRu:& @G"A\9SbLWaUAt\y5 nDU/MӠ{/X93Cc[z}Cr1NJ&BsS ĥ(@2^) %|w)Wcy>y8d?J @v-v뭷~oo+PN$0(PLN!Q`@@L\b5k۶v:mk= ގ*}i Ѱa@[n@7ǧD(@'ʤQ[{][jmڻ{0 q0A&IAα׬;~oށ?KҔ8@.=N:!!mF@vΏ?a^hň#Y]!ͺhgæFq")ۈR92_ $/Rnxν5U/h[lv#GQqΩSH-eoZ4:T'[a1Fx X?Z̆{׻YS]9bb$u:&  $K,"0ƛ\Qcy֭[lͶl>~EquIKӕ= ]w<r̘6AwTm+% K}tL/HR)y(" a) pNNƖn#2*IuuP.(]r}KTJw(Vtޒ j:FA>,;hZMVw*(Q`(~f3O@ pѪ;9 =j8QH:rVfm:Vfm[[}nQRIY3&y.9rY@%(&g*ATG7 N3g9NMcdZ#G߰V(Y;m RY;SH$HVک.@z#vwV@7;#CǣOS 䠓8UPJ n8-w1tErH޺W|;5/Y(Pi $4S}Rz /FT ̤"AM?A~ݤ5s/D R d +gd7]#Fs&6(:e_|ҹKW,_ϐ~*@ rrlQ%j/êXo-@ҥ ơ "tr vbPN@)r۩b4DH我Njk뜜.2׸b8ew@ 5Z}ⰵUGB@sJ$EHʏ?hE bpKlmTCW̫dCԖW \% Rsx+ospl5B;h7@5l$ssG}\e\-vTWuL-IvCkīXrd"ǁNs}O'q07$fm؁{EmDNm@p^S=yI(6.)N>N~F/ {{:GAe3j#N?C $̫؃~O\C% wV XJMwPZ۞3^+ip@P$]r'r+i{L$o'"1N'">p~7Pnf@7[}4+Vى/ yLpZA|pl}=! ]I"8;K]-}nRiˎ>|+>h1y+b9mD"Hi2 tb._;a]h(.QDG\ Z;loRHEΑsDit5@T-vTJPZ8a>1Z~A3tvA(2VܬR3>ݿvJ54$H@Ii )yͪUK}xFgm8-wS\\&!Wܱ^#lPzu1)=M3NPs88EŋdҽW"2Ɋ}7VX]5k6Vr"mrGٞ~َ>ݹiq5|T@P rm/8HՊy6Cѷ׸t%l >v8(tss3G_f .:fXaI$kշda>79pRvWP:]~{vٱd@xԍ >hu#^WH 9@OJ][;' .cN$3܎.:lCzDd h)S7pf;-GPPu/jT*p]n`OpS+eUcFٰsk?q:myy3Fk liI N#GCU=[g[Oɹ#ZN G# t:xW ÜgZ[s\/SNEsY:.\2~kV5Q7Tť0(CӣFjk:%"ژYˎm?:'8TMt,9^~`HHVuUpG6sæ_wa%+dW~FaMR6DuzhY$SumMlYʹF8RFhSHS qyjsj](:WPh۽3Ose KsKIG^8?6sq<*Z6JvxyTSc(>\\Z݃%P63h#W\琧#=RH(Ce2ԣgv85U_\NdS t6=o2+1GLqyqkwO٩WwɦTVB A@[*̅t,cոZ{CM`nE6zIePRkó()@2R"QyI:8,>k?!-IDAT%?έ?+y `S:N H(ImĜi6&.s]Mf?5V%PO8gGݰ:ϝ֣Z9J5WиvU6zNQ7K ąbPn;𜒸io 9 $# 08U9>pEN Dn2Jɝ-^$9|BlJ\T]^G^geУ ʶ,>˧j"p^u~ ioX gRJ3ysO!(DR leõu8A9ȁ^2ܖ$N&,VV8RH]Y4:uzb"<ˁG`!:k}G~^/7Y-6YȖN0=|!=zw $Ai3Bk G͖vO-J)N0E+[2XD.H 2p|W28N[,Q# RuV&U p9W8&E^O!Q,!H P! dӋiIA^'En1J"@q\|x98%̹Ev:'$B@@Ht,P0 J^Ep8lc7`>l X@ ١yL4.blLxzJ/[r9B"?!\0B@@z@8V@@@z@^n% $ $ $L} Q Q Q eIENDB`uqfoundation-pathos-33e3f91/docs/source/pathos.rst000066400000000000000000000041651467657623600223750ustar00rootroot00000000000000pathos module documentation =========================== abstract_launcher module ------------------------ .. automodule:: pathos.abstract_launcher .. :exclude-members: + connection module ----------------- .. automodule:: pathos.connection .. :exclude-members: + core module ----------- .. automodule:: pathos.core .. :exclude-members: + helpers module -------------- .. toctree:: :titlesonly: :maxdepth: 2 helpers .. automodule:: pathos.helpers .. :exclude-members: + hosts module ------------ .. automodule:: pathos.hosts .. :exclude-members: + maps module ----------- .. automodule:: pathos.maps .. :exclude-members: + mp_map module ------------- .. automodule:: pathos.mp_map .. :exclude-members: + multiprocessing module ---------------------- .. automodule:: pathos.multiprocessing .. :exclude-members: + parallel module --------------- .. automodule:: pathos.parallel .. :exclude-members: + pools module ------------ .. automodule:: pathos.pools .. :exclude-members: + portpicker module ----------------- .. automodule:: pathos.portpicker .. :exclude-members: + pp module --------- .. automodule:: pathos.pp .. :exclude-members: + pp_map module ------------- .. automodule:: pathos.pp_map .. :exclude-members: + profile module -------------- .. automodule:: pathos.profile .. :exclude-members: + python module ------------- .. automodule:: pathos.python .. :exclude-members: + secure module ------------- .. toctree:: :titlesonly: :maxdepth: 2 secure .. automodule:: pathos.secure .. :exclude-members: + selector module --------------- .. automodule:: pathos.selector .. :exclude-members: + serial module ------------- .. automodule:: pathos.serial .. :exclude-members: + server module ------------- .. automodule:: pathos.server .. :exclude-members: + threading module ---------------- .. automodule:: pathos.threading .. :exclude-members: + util module ----------- .. automodule:: pathos.util .. :exclude-members: + xmlrpc module ------------- .. toctree:: :titlesonly: :maxdepth: 2 xmlrpc .. automodule:: pathos.xmlrpc .. :exclude-members: + uqfoundation-pathos-33e3f91/docs/source/scripts.rst000066400000000000000000000003721467657623600225620ustar00rootroot00000000000000pathos scripts documentation ============================ pathos_connect script --------------------- .. automodule:: _pathos_connect .. :exclude-members: + portpicker script ----------------- .. automodule:: _portpicker .. :exclude-members: + uqfoundation-pathos-33e3f91/docs/source/secure.rst000066400000000000000000000005441467657623600223620ustar00rootroot00000000000000pathos.secure module documentation ================================== connection module ----------------- .. automodule:: pathos.secure.connection .. :exclude-members: + copier module ------------- .. automodule:: pathos.secure.copier .. :exclude-members: + tunnel module ------------- .. automodule:: pathos.secure.tunnel .. :exclude-members: + uqfoundation-pathos-33e3f91/docs/source/xmlrpc.rst000066400000000000000000000002411467657623600223730ustar00rootroot00000000000000pathos.xmlrpc module documentation ================================== server module ------------- .. automodule:: pathos.xmlrpc.server .. :exclude-members: + uqfoundation-pathos-33e3f91/examples/000077500000000000000000000000001467657623600177255ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/examples/README000066400000000000000000000001301467657623600205770ustar00rootroot00000000000000Notes: ------ test_mpmap_dill: has patch to enable dill override of pickle in python3 uqfoundation-pathos-33e3f91/examples/async_map.py000066400000000000000000000042421467657623600222530ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE import time import sys def busy_add(x,y, delay=0.01): for n in range(x): x += n for n in range(y): y -= n time.sleep(delay) return x + y def busy_squared(x): import random time.sleep(0.01*random.random()) return x*x def squared(x): return x*x def quad_factory(a=1, b=1, c=0): def quad(x): return a*x**2 + b*x + c return quad square_plus_one = quad_factory(2,0,1) def test_ready(pool, f, maxtries, delay): print(pool) print("y = %s(x1,x2)" % f.__name__) print("x1 = %s" % str(x[:10])) print("x2 = %s" % str(x[:10])) print("I'm sleepy...") args = f.__code__.co_argcount kwds = f.__defaults__ args = args - len(kwds) if kwds else args if args == 1: m = pool.amap(f, x) elif args == 2: m = pool.amap(f, x, x) else: msg = 'takes a function of 1 or 2 required arguments, %s given' % args raise NotImplementedError(msg) tries = 0 while not m.ready(): if not tries: print("Z", end='') time.sleep(delay) tries += 1 if (tries % (len(x)*0.01)) == 0: print('z', end='') sys.stdout.flush() if tries >= maxtries: print("TIMEOUT") break print("") y = m.get() print("I'm awake") print("y = %s" % str(y[:10])) if __name__ == '__main__': x = list(range(500)) delay = 0.01 maxtries = 200 f = busy_add #f = busy_squared #f = squared #from pathos.pools import ProcessPool as Pool #from pathos.pools import ThreadPool as Pool from pathos.pools import ParallelPool as Pool #from pathos.helpers import freeze_support, shutdown #freeze_support() pool = Pool(nodes=4) test_ready( pool, f, maxtries, delay ) # shutdown pool.close() pool.join() pool.clear() # EOF uqfoundation-pathos-33e3f91/examples/map_pool.py000066400000000000000000000022511467657623600221050ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @uqfoundation) # Copyright (c) 2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.maps import Imap from pathos.pools import ProcessPool squared = lambda x:x*x # serial map (in-line for loop) print("list(map(squared, range(4))): %s" % list(map(squared, range(4)))) # pathos serial map _map = Imap() print("list(Imap()(squared, range(4))): %s" % list(_map(squared, range(4)))) # pathos process-parallel map _map = Imap(ProcessPool) print("list(Imap(ProcessPool)(squared, range(4))): %s" % list(_map(squared, range(4)))) # pathos pool-based parallel map pool = ProcessPool() print("list(ProcessPool().imap(squared, range(4))): %s" % list(pool.imap(squared, range(4)))) # pathos asynchronous parallel map result = pool.amap(squared, range(4)) print("ProcessPool().amap(squared, range(4)).get(): %s" % result.get()) # pathos thread-parallel map from pathos.pools import ThreadPool tpool = ThreadPool() print("list(ThreadPool().imap(squared, range(4))): %s" % list(tpool.imap(squared, range(4)))) uqfoundation-pathos-33e3f91/examples/mp_class_example.py000066400000000000000000000033541467657623600236200ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Nick Rhinehart (nrhineha @cmu) # Copyright (c) 2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.pools import ProcessPool, ThreadPool import logging log = logging.getLogger(__name__) class PMPExample(object): def __init__(self): self.cache = {} def compute(self, x): self.cache[x] = x ** 3 return self.cache[x] def threadcompute(self, xs): pool = ThreadPool(4) results = pool.map(self.compute, xs) return results def processcompute(self, xs): pool = ProcessPool(4) results = pool.map(self.compute, xs) return results def parcompute_example(): dc = PMPExample() dc2 = PMPExample() dc3 = PMPExample() dc4 = PMPExample() n_datapoints = 100 inp_data = range(n_datapoints) r1 = dc.threadcompute(inp_data) assert(len(dc.cache) == n_datapoints) r2 = dc2.processcompute(inp_data) assert(len(dc2.cache) == 0) assert(r1 == r2) r3 = ProcessPool(4).map(dc3.compute, inp_data) r4 = ThreadPool(4).map(dc4.compute, inp_data) ProcessPool.__state__.clear() ThreadPool.__state__.clear() assert(r4 == r3 == r2) assert(len(dc3.cache) == 0) assert(len(dc4.cache) == n_datapoints) log.info("Size of threadpooled class caches: {0}, {1}".format(len(dc.cache), len(dc4.cache))) log.info("Size of processpooled class caches: {0}, {1}".format(len(dc2.cache), len(dc3.cache))) if __name__ == '__main__': logging.basicConfig() log.setLevel(logging.INFO) parcompute_example() uqfoundation-pathos-33e3f91/examples/nested.py000066400000000000000000000017411467657623600215640ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2015-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE def g(x): import random return int(x * random.random()) def h(x): return sum(tmap(g, x)) def f(x,y): return x*y x = range(10) y = range(5) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() from pathos.pools import ProcessPool, ThreadPool amap = ProcessPool().amap tmap = ThreadPool().map print(amap(f, [h(x),h(x),h(x),h(x),h(x)], y).get()) def _f(m, g, x, y): return sum(m(g,x))*y print(amap(_f, [tmap]*len(y), [g]*len(y), [x]*len(y), y).get()) from math import sin, cos print(amap(tmap, [sin,cos], [x,x]).get()) shutdown() # EOF uqfoundation-pathos-33e3f91/examples/nested2.py000066400000000000000000000021151467657623600216420ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2015-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE def g(x): import random return int(x * random.random()) def h(x): return sum(tmap(g, x)) def f(x,y): return x*y x = range(10) y = range(5) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() from pathos.pools import ProcessPool, ThreadPool from pathos.maps import Map, Amap amap = Amap(ProcessPool)#, close=True, join=True, clear=True) tmap = Map(ThreadPool, close=True, join=True, clear=True) print(amap(f, [h(x),h(x),h(x),h(x),h(x)], y).get()) def _f(m, g, x, y): return sum(m(g,x))*y print(amap(_f, [tmap]*len(y), [g]*len(y), [x]*len(y), y).get()) from math import sin, cos print(amap(tmap, [sin,cos], [x,x]).get()) shutdown() # EOF uqfoundation-pathos-33e3f91/examples/pp_map.py000077500000000000000000000051541467657623600215630ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ example of using the 'raw' distributed parallel mapper To run: python pp_map.py """ from pathos.pools import ParallelPool as Pool pool = Pool() if __name__ == '__main__': def add(x, y, z): """Add three values""" return x + y + z def busybeaver(x): """This can take a while""" for num in range(1000000): x = x + num return x # Immediate evaluation example import time start = time.time() results = pool.map(busybeaver, range(10)) print('Time to queue the jobs: %s' % (time.time() - start)) start = time.time() # Casting the ppmap generator to a list forces each result to be # evaluated. When done immediately after the jobs are submitted, # our program twiddles its thumbs while the work is finished. print(list(results)) print('Time to get the results: %s' % (time.time() - start)) # Delayed evaluation example start = time.time() results = pool.imap(busybeaver, range(10)) print('Time to queue the jobs: %s' % (time.time() - start)) # In contrast with the above example, this time we're submitting a # batch of jobs then going off to do more work while they're # processing. Maybe "time.sleep" isn't the most exciting example, # but it illustrates the point that our main program can do work # before ppmap() is finished. Imagine that you're submitting some # heavyweight image processing jobs at the beginning of your # program, going on to do other stuff like fetching more work to # do from a remote server, then coming back later to handle the # results. time.sleep(5) start = time.time() print(list(results)) print('Time to get the first results: %s' % (time.time() - start)) # Built-in map example print(list(map(add, [1, 2, 3], [4, 5, 6], [7, 8, 9]))) # Trivial ppmap tests for i in range(10): print('-' * 30) start = time.time() print(pool.map(add, [1, 2, 3], [4, 5, 6], [7, 8, 9])) print('Iteration time: %s' % (time.time() - start)) # Heavier ppmap tests for i in range(10): print('-' * 30) start = time.time() print(pool.map(busybeaver, range(10))) print('Iteration time: %s' % (time.time() - start)) # cleanup pool.clear() uqfoundation-pathos-33e3f91/examples/secure_copy.py000066400000000000000000000036201467657623600226200ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ example of using the secure copy interface To run: python secure_copy.py """ from pathos.secure import Copier, Pipe if __name__=='__main__': source0 = 'test.txt' source1 = '~/test.txt' source2 = '~/result.txt' dest0 = source1 dest1 = source2 dest2 = '.' cpu1 = 'localhost' cpu2 = 'localhost' #cpu1 = 'computer.cacr.caltech.edu' #cpu2 = 'foobar.danse.us' del1 = 'rm '+source1 del2 = 'rm '+source2 copier = Copier('LauncherSCP') print('creating %s' % source0) f = open(source0,'w') f.write('Test Successful!\n') f.close() from time import sleep sleep(1) #FIXME: needs time to work... print('executing {scp %s %s:%s}' % (source0,cpu1,dest0)) copier(source=source0, destination=cpu1+':'+dest0) copier.launch() sleep(1) #FIXME: needs time to work... print('executing {scp %s:%s %s:%s}' % (cpu1,source1,cpu2,dest1)) copier(source=cpu1+':'+source1, destination=cpu2+':'+dest1) copier.launch() sleep(1) #FIXME: needs time to work... print('executing {scp %s:%s %s}' % (cpu2,source2,dest2)) copier(source=cpu2+':'+source2, destination=dest2) copier.launch() sleep(1) #FIXME: needs time to work... print('cleanup temporary files...') import os os.remove(source0) launcher = Pipe('cleanup') launcher(command=del1, host=cpu1, background=True) launcher.launch() launcher(command=del2, host=cpu2, background=True) launcher.launch() # print('cleanup result file...') # os.remove("."+os.sep+os.path.basename(source2)) # End of file uqfoundation-pathos-33e3f91/examples/secure_hello.py000066400000000000000000000021761467657623600227560ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ example of using the secure launch interface To run: python secure_hello.py """ from pathos.secure import Pipe if __name__ == '__main__': # test command and remote host command1 = 'echo "hello from..."' command2 = 'hostname' #command3 = 'sleep 5' #XXX: buggy? #command3 = '' #XXX: buggy ? rhost = 'localhost' #rhost = 'computer.cacr.caltech.edu' #rhost = 'foobar.danse.us' launcher = Pipe('LauncherSSH') launcher(command=command1, host=rhost, background=False) launcher.launch() print(launcher.response()) launcher(command=command2, host=rhost, background=False) launcher.launch() print(launcher.response()) #launcher(command=command3, host=rhost, background=False) #launcher.launch() #print(launcher.response()) # End of file uqfoundation-pathos-33e3f91/examples/simple_tunnel.py000066400000000000000000000015471467657623600231640ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ example of building a simple ssh-tunnel To run: python simple_tunnel.py """ from pathos.secure import Tunnel if __name__ == '__main__': import sys rhost = 'localhost' #rhost = 'foobar.danse.us' #rhost = 'computer.cacr.caltech.edu' rport = 23 t = Tunnel('Tunnel') lport = t.connect(rhost, rport) print('SSH Tunnel to: %s' % rhost) print('Remote port: %s' % rport) print('Local port: %s' % lport) print('Press to disconnect') sys.stdin.readline() t.disconnect() uqfoundation-pathos-33e3f91/examples/spawn.py000066400000000000000000000016271467657623600214350ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ demonstrate pathos's spawn2 function """ from pathos.util import spawn2, _b, _str if __name__ == '__main__': import os def onParent(pid, fromchild, tochild): s = _str(fromchild.readline()) print(s, end='') tochild.write(_b('hello son\n')) tochild.flush() os.wait() def onChild(pid, fromparent, toparent): toparent.write(_b('hello dad\n')) toparent.flush() s = _str(fromparent.readline()) print(s, end='') os._exit(0) spawn2(onParent, onChild) # End of file uqfoundation-pathos-33e3f91/examples/sum_primesX.py000077500000000000000000000063151467657623600226220ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Calculate the sum of all primes below given integer n. Usage: python sum_primesX.py [tunnelport] [tunnelport] - the port number(s) of the local ssh tunnel connection, if omitted no tunneling will be used. To establish a ssh-tunneled server, please see $ pathos_connect --help """ import math import sys import ppft LOCAL_WORKERS = 'autodetect' #XXX: 'autodetect' or 0,1,2,... def isprime(n): """Returns True if n is prime and False otherwise""" if not isinstance(n, int): raise TypeError("argument passed to is_prime is not of 'int' type") if n < 2: return False if n == 2: return True max = int(math.ceil(math.sqrt(n))) i = 2 while i <= max: if n % i == 0: return False i += 1 return True def sum_primes(n): """Calculates sum of all primes below given integer n""" return sum([x for x in range(2, n) if isprime(x)]) ######################################################################## print("""Usage: python sum_primesX.py [tunnelport] [tunnelport] - the port number(s) of the local ssh tunnel connection, if omitted no tunneling will be used.""") ppservers = [] for i in range(1,len(sys.argv)): tunnelport = int(sys.argv[i]) ppservers.append("localhost:%s" % tunnelport) ppservers = tuple(ppservers) # Creates jobserver with automatically detected number of workers job_server = ppft.Server(ppservers=ppservers) # Allow running without local workers if LOCAL_WORKERS != 'autodetect': job_server.set_ncpus(LOCAL_WORKERS) #print("Known servers: [('local',)] %s %s" % (job_server.ppservers,job_server.auto_ppservers)) print("Known servers: [('local',)] %s" % (job_server.ppservers)) print("Starting ppft with %s local workers" % job_server.get_ncpus()) # Submit a job of calulating sum_primes(100) for execution. # sum_primes - the function # (100,) - tuple with arguments for sum_primes # (isprime,) - tuple with functions on which function sum_primes depends # ("math",) - tuple with module names which must be imported before # sum_primes execution # Execution starts as soon as one of the workers will become available ###job1 = job_server.submit(sum_primes, (100, ), (isprime, ), ("math", )) # Retrieves the result calculated by job1 # The value of job1() is the same as sum_primes(100) # If the job has not been finished yet, execution will # wait here until result is available ###result = job1() ###print("Sum of primes below 100 is %s" % result) # The following submits 8 jobs and then retrieves the results inputs = (100000, 100100, 100200, 100300, 100400, 100500, 100600, 100700) jobs = [(input, job_server.submit(sum_primes, (input, ), (isprime, ), ("math", ))) for input in inputs] for input, job in jobs: print("Sum of primes below %s is %s" % (input, job())) job_server.print_stats() # Parallel Python Software: http://www.parallelpython.com uqfoundation-pathos-33e3f91/examples/test_mpmap.py000066400000000000000000000016511467657623600224530ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() from pathos.pools import ProcessPool as Pool pool = Pool() print("Evaluate 5 items on 2 proc:") pool.ncpus = 2 res3 = pool.map(host, range(5)) print(pool) print('\n'.join(res3)) print('') print("Evaluate 5 items on 10 proc:") pool.ncpus = 10 res5 = pool.map(host, range(5)) print(pool) print('\n'.join(res5)) shutdown() # end of file uqfoundation-pathos-33e3f91/examples/test_mpmap2.py000077500000000000000000000020461467657623600225370ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) if __name__ == '__main__': from pathos.pools import ThreadPool as TPool tpool = TPool() print("Evaluate 10 items on 1 thread") tpool.nthreads = 1 res3 = tpool.map(host, range(10)) print(tpool) print('\n'.join(res3)) print('') print("Evaluate 10 items on 2 threads") tpool.nthreads = 2 res5 = tpool.map(host, range(10)) print(tpool) print('\n'.join(res5)) print('') print("Evaluate 10 items on ? threads") tpool.nthreads = None res9 = tpool.map(host, range(10)) print(tpool) print('\n'.join(res9)) print('') tpool.clear() # end of file uqfoundation-pathos-33e3f91/examples/test_mpmap3.py000077500000000000000000000026101467657623600225350ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # pickle fails for nested functions def adder(augend): zero = [0] def inner(addend): return addend+augend+zero[0] return inner # build from inner function add_me = adder(5) # build from lambda functions squ = lambda x:x**2 if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() from pathos.pools import ProcessPool as Pool from pathos.pools import ThreadPool as TPool pool = Pool() tpool = TPool() # test 'dilled' multiprocessing for inner print("Evaluate 10 items on 2 proc:") pool.ncpus = 2 print(pool) print(pool.map(add_me, range(10))) print('') # test 'dilled' multiprocessing for lambda print("Evaluate 10 items on 4 proc:") pool.ncpus = 4 print(pool) print(pool.map(squ, range(10))) print('') # test for lambda, but with threads print("Evaluate 10 items on 4 threads:") tpool.nthreads = 4 print(tpool) print(tpool.map(squ, range(10))) print('') # shutdown all cached pools shutdown() # end of file uqfoundation-pathos-33e3f91/examples/test_mpmap_dill.py000077500000000000000000000030241467657623600234560ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE import dill import pickle #XXX: multiprocessing needs cPickle + copy_reg dumps = pickle._dumps loads = pickle._loads # pickle fails for nested functions def adder(augend): zero = [0] def inner(addend): return addend+augend+zero[0] return inner # test the pickle-ability of inner function add_me = adder(5) pinner = dumps(add_me) p_add_me = loads(pinner) assert add_me(10) == p_add_me(10) # pickle fails for lambda functions squ = lambda x:x**2 # test the pickle-ability of inner function psqu = dumps(squ) p_squ = loads(psqu) assert squ(10) == p_squ(10) if __name__ == '__main__': from pathos.helpers import freeze_support freeze_support() from pathos.pools import _ProcessPool as Pool pool = Pool() # if pickle works, then multiprocessing should too print("Evaluate 10 items on 2 proc:") pool.ncpus = 2 p_res = pool.map(add_me, range(10)) print(pool) print('%s' % p_res) print('') # if pickle works, then multiprocessing should too print("Evaluate 10 items on 4 proc:") pool.ncpus = 4 p2res = pool.map(squ, range(10)) print(pool) print('%s' % p2res) print('') # shutdown the pool pool.close() # end of file uqfoundation-pathos-33e3f91/examples/test_ppmap.py000077500000000000000000000015031467657623600224550ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.parallel import stats from pathos.parallel import ParallelPool as Pool pool = Pool() def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) print("Evaluate 10 items on 1 cpu") pool.ncpus = 1 res3 = pool.map(host, range(10)) print(pool) print('\n'.join(res3)) print(stats()) print("Evaluate 10 items on 2 cpus") pool.ncpus = 2 res5 = pool.map(host, range(10)) print(pool) print('\n'.join(res5)) print(stats()) pool.clear() # end of file uqfoundation-pathos-33e3f91/examples/test_ppmap2.py000077500000000000000000000014461467657623600225450ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.parallel import stats from pathos.parallel import ParallelPool as Pool pool = Pool() def host(id): import socket import time time.sleep(1.0) return "Rank: %d -- %s" % (id, socket.gethostname()) print("Evaluate 10 items on 2 cpus") #FIXME: reset lport below pool.ncpus = 2 pool.servers = ('localhost:5653',) res5 = pool.map(host, range(10)) print(pool) print('\n'.join(res5)) print(stats()) print('') pool.clear() # end of file uqfoundation-pathos-33e3f91/examples/test_profile.py000066400000000000000000000042701467657623600230010ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ demonstrates use of the pathos profiler inspired by: http://stackoverflow.com/a/32522579/4646678 """ from pathos.helpers import mp import time import random from pathos.profile import * if __name__ == '__main__': config = dict(gen=process_id) #@profiled(**config) def _work(i): x = random.random() time.sleep(x) return (i,x) work = profiled(**config)(_work) """ # create a profiling pool mpPool = profiling(mp.Pool) pool = mpPool(10) #XXX: ALT: pool = mp.Pool(10, enable_profiling) for i in pool.imap_unordered(work, range(100)): print(i) """ enable_profiling() """ # profile the work (not the map internals) in the main thread for i in map(work, range(-10,0)): print(i) """ """ # profile the map (but not the work, which profiles as thread.lock methods) pool = mp.Pool(10) _uimap = profiled(**config)(pool.imap_unordered) for i in _uimap(_work, range(-10,0)): print(i) """ """ # profile the map, with work profiled in another thread pool = mp.Pool(10) _uimap = profiled(**config)(pool.imap_unordered) for i in _uimap(work, range(-10,0)): print(i) # deactivate all profiling disable_profiling() # in the main thread tuple(_uimap(disable_profiling, range(10))) # in the workers for i in _uimap(work, range(-20,-10)): print(i) """ # activate profiling, but remove profiling from the worker enable_profiling() for i in map(not_profiled(work), range(-30,-20)): print(i) # print stats for profile of 'import math' in another process def import_ppft(*args): import ppft import pathos.pools as pp pool = pp.ProcessPool(1) profile('cumulative', pipe=pool.pipe)(import_ppft) pool.close() pool.join() pool.clear() # EOF uqfoundation-pathos-33e3f91/examples/xmlrpc_server.py000066400000000000000000000023431467657623600231740ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ example of building a simple xmlrpc server and proxy, and then demonstrate the handling of a few basic requests To run: python xmlrpc_server.py To stop: < Ctrl+C > """ from pathos.xmlrpc import XMLRPCServer from pathos import logger logger(level=20, name='pathos.xmlrpc') # logging.INFO logger(level=20, name='pathos.selector') # logging.INFO if __name__ == '__main__': import os, time import xmlrpc.client as client s = XMLRPCServer('', 0) print('port=%d' % s.port) port = s.port pid = os.fork() if pid > 0: #parent def add(x, y): return x + y s.register_function(add) s.activate() #s._selector._info.activate() s.serve() else: #child time.sleep(1) s = client.ServerProxy('http://localhost:%d' % port) print('1 + 2 = %s' % s.add(1, 2)) print('3 + 4 = %s' % s.add(3, 4)) # End of file uqfoundation-pathos-33e3f91/examples2/000077500000000000000000000000001467657623600200075ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/examples2/README000066400000000000000000000021441467657623600206700ustar00rootroot00000000000000Notes: ----- Contains two "extended" examples: 1) scatter-gather, with different parallel backends 2) optimization, with different parallel backends "scatter-gather" demonstrates scatter-gather from mpi4py, and requires numpy and mpi4py. "all_scatter_gather*" demonstrates the pathos versions of scatter-gather, using a Pool and map. "all_scatter-gather*" will leverage pyina (if pyina is available). "optimize" demonstrates using pathos to extend optimization to leverage parallel computing. "optimize*" requires the mystic optimization framework. "optimize0" demonstrates an example optimiztion with mystic, and "optimize" converts "optimize0" to use a map function. "optimize_powell" demonstrates the common API for the different parallel backends in pathos (and pyina). "optimize_XXX_YYY_ZZZ" shows different specific combinations of objective function XXX={rosen, cheby}, optimization algorithm YYY={powell, diffev}, and map function ZZZ={map (serial), mpmap (multi-process), ppmap (parallel python), mpimap (MPI)}. Note that "optimize_powell" and "optimize_cheby_powell_mpimap" both require pyina to be installed. uqfoundation-pathos-33e3f91/examples2/all_scatter_gather.py000066400000000000000000000042731467657623600242160ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """example: using the same code with different parallel backends Requires: development version of pathos, pyina http://pypi.python.org/pypi/pathos http://pypi.python.org/pypi/pyina Run with: >$ python all_scatter_gather.py """ import numpy as np from pathos.helpers import freeze_support, shutdown from pathos.pools import ProcessPool from pathos.pools import ParallelPool from pathos.pools import ThreadPool try: from pyina.launchers import Mpi as MpiPool HAS_PYINA = True except ImportError: HAS_PYINA = False nodes = 2; N = 3 # take sin squared of all data def sin2(xi): """sin squared of all data""" import numpy as np return np.sin(xi)**2 if __name__ == '__main__': # ensure properly forks on Windows freeze_support() # print the input to screen x = np.arange(N * nodes, dtype=np.float64) print("Input: %s\n" % x) # run sin2 in series, then print to screen print("Running serial python ...") y = list(map(sin2, x)) print("Output: %s\n" % np.asarray(y)) if HAS_PYINA: # map sin2 to the workers, then print to screen print("Running mpi4py on %d cores..." % nodes) y = MpiPool(nodes).map(sin2, x) print("Output: %s\n" % np.asarray(y)) # map sin2 to the workers, then print to screen print("Running multiprocesing on %d processors..." % nodes) y = ProcessPool(nodes).map(sin2, x) print("Output: %s\n" % np.asarray(y)) # map sin2 to the workers, then print to screen print("Running multiprocesing on %d threads..." % nodes) y = ThreadPool(nodes).map(sin2, x) print("Output: %s\n" % np.asarray(y)) # map sin2 to the workers, then print to screen print("Running parallelpython on %d cpus..." % nodes) y = ParallelPool(nodes).map(sin2, x) print("Output: %s\n" % np.asarray(y)) # ensure all pools shutdown shutdown() # EOF uqfoundation-pathos-33e3f91/examples2/all_scatter_gather2.py000066400000000000000000000044711467657623600243000ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """example: using the same code with different parallel backends Requires: development version of pathos, pyina http://pypi.python.org/pypi/pathos http://pypi.python.org/pypi/pyina Run with: >$ python all_scatter_gather2.py """ import numpy as np from pathos.helpers import freeze_support, shutdown from pathos.pools import ProcessPool from pathos.pools import ParallelPool from pathos.pools import ThreadPool try: from pyina.launchers import Mpi as MpiPool HAS_PYINA = True except ImportError: HAS_PYINA = False nodes = 2; N = 3 # the sin of the difference of two numbers def sin_diff(x, xp): """d = sin(x - x')""" from numpy import sin return sin(x - xp) if __name__ == '__main__': # ensure properly forks on Windows freeze_support() # print the input to screen x = np.arange(N * nodes, dtype=np.float64) xp = np.arange(N * nodes, dtype=np.float64)[::-1] print("Input: %s\n" % x) # map sin_diff to the workers, then print to screen print("Running serial python ...") y = list(map(sin_diff, x, xp)) print("Output: %s\n" % np.asarray(y)) if HAS_PYINA: # map sin_diff to the workers, then print to screen print("Running mpi4py on %d cores..." % nodes) y = MpiPool(nodes).map(sin_diff, x, xp) print("Output: %s\n" % np.asarray(y)) # map sin_diff to the workers, then print to screen print("Running multiprocesing on %d processors..." % nodes) y = ProcessPool(nodes).map(sin_diff, x, xp) print("Output: %s\n" % np.asarray(y)) # map sin_diff to the workers, then print to screen print("Running multiprocesing on %d threads..." % nodes) y = ThreadPool(nodes).map(sin_diff, x, xp) print("Output: %s\n" % np.asarray(y)) # map sin_diff to the workers, then print to screen print("Running parallelpython on %d cpus..." % nodes) y = ParallelPool(nodes).map(sin_diff, x, xp) print("Output: %s\n" % np.asarray(y)) # ensure all pools shutdown shutdown() # EOF uqfoundation-pathos-33e3f91/examples2/dejong.py000066400000000000000000000014171467657623600216320ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Rosenbrock's function """ from numpy import sum as numpysum from numpy import asarray def rosen(coeffs): """evaluates n-dimensional Rosenbrock function for a list of coeffs minimum is f(x)=0.0 at xi=1.0""" x = [1]*2 # ensure that there are 2 coefficients x[:len(coeffs)]=coeffs x = asarray(x) #XXX: must be a numpy.array return numpysum(100.0*(x[1:]-x[:-1]**2.0)**2.0 + (1-x[:-1])**2.0)#,axis=0) # End of file uqfoundation-pathos-33e3f91/examples2/optimize.py000077500000000000000000000037201467657623600222260ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Minimize the selected model with Powell's method. Requires: development version of mystic http://pypi.python.org/pypi/mystic """ def optimize(solver, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock # Rosenbrock function from dejong import rosen as the_model ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev # Chebyshev cost function from poly import chebyshev4cost as the_model from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = ([random.uniform(-100,100) for i in range(ndim)] for i in range(N)) model = (the_model for i in range(N)) # minimize the function results = map(the_solver, model, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver if __name__ == '__main__': target = 'rosen' #target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') optimize(the_solver, nodes=3, target=target) # end of file uqfoundation-pathos-33e3f91/examples2/optimize0.py000077500000000000000000000035411467657623600223070ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Minimize the selected model with Powell's method. Requires: development version of mystic http://pypi.python.org/pypi/mystic """ def optimize(solver, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock # Rosenbrock function from dejong import rosen as the_model ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev # Chebyshev cost function from poly import chebyshev4cost as the_model from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials print("One trial:") print("===============") # initial guess import random x0 = [random.uniform(-100,100) for i in range(ndim)] # minimize the function results = the_solver(the_model, x0, **kwds) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(results[0])) print("Function value: %s" % results[1]) print("Total function evals: %s" % results[3]) return # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver if __name__ == '__main__': target = 'rosen' #target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') optimize(the_solver, target=target) #optimize(the_solver, target=target, monitor=True) #optimize(the_solver, target=target, monitor=True, disp=False) # end of file uqfoundation-pathos-33e3f91/examples2/optimize_cheby_diffev_map.py000077500000000000000000000042311467657623600255560ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Solve Nth-order Chebyshev polynomial coefficients with Differential Evolution. Launch optimizers with python's map. Requires: development version of mystic, pathos http://pypi.python.org/pypi/mystic http://pypi.python.org/pypi/pathos """ def optimize(solver, mapper, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = ([random.uniform(-100,100) for i in range(ndim)] for i in range(N)) # minimize the function results = mapper(nodes).map(solver, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # build the solver-model pairs def diffev_chebyshev(x0, *args, **kwds): # Differential Evolution solver from optimize_helper import diffev as the_solver # Chebyshev cost function from poly import chebyshev4cost as the_model return the_solver(the_model, x0, monitor=True, *args, **kwds) # get the map functions from pathos.serial import SerialPool as serial if __name__ == '__main__': target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'diffev') optimize(diffev_chebyshev, serial, nodes=1, target=target) # end of file uqfoundation-pathos-33e3f91/examples2/optimize_cheby_powell_map.py000077500000000000000000000042311467657623600256150ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Solve Nth-order Chebyshev polynomial coefficients with Powell's method. Launch optimizers with python's map. Requires: development version of mystic, pathos http://pypi.python.org/pypi/mystic http://pypi.python.org/pypi/pathos """ def optimize(solver, mapper, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = ([random.uniform(-100,100) for i in range(ndim)] for i in range(N)) # minimize the function results = mapper(nodes).map(solver, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # build the solver-model pairs def powell_chebyshev(x0, *args, **kwds): # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver # Chebyshev cost function from poly import chebyshev4cost as the_model return the_solver(the_model, x0, monitor=True, *args, **kwds) # get the map functions from pathos.serial import SerialPool as serial if __name__ == '__main__': target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') optimize(powell_chebyshev, serial, nodes=1, target=target) # end of file uqfoundation-pathos-33e3f91/examples2/optimize_cheby_powell_mpimap.py000077500000000000000000000042261467657623600263270ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Solve Nth-order Chebyshev polynomial coefficients with Powell's method. Launch optimizers with mpi4py's map. Requires: development version of mystic, pyina http://pypi.python.org/pypi/mystic http://pypi.python.org/pypi/pyina """ def optimize(solver, mapper, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = [[random.uniform(-100,100) for i in range(ndim)] for i in range(N)] # minimize the function results = mapper(nodes).map(solver, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # build the solver-model pairs def powell_chebyshev(x0, *args, **kwds): # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver # Chebyshev cost function from poly import chebyshev4cost as the_model return the_solver(the_model, x0, monitor=False, *args, **kwds) # get the map functions from pyina.launchers import Mpi as mpipool if __name__ == '__main__': target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') optimize(powell_chebyshev, mpipool, nodes=10, target=target) # end of file uqfoundation-pathos-33e3f91/examples2/optimize_cheby_powell_mpmap.py000077500000000000000000000044121467657623600261530ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Solve Nth-order Chebyshev polynomial coefficients with Powell's method. Launch optimizers with multiprocessing's map. Requires: development version of mystic, pathos http://pypi.python.org/pypi/mystic http://pypi.python.org/pypi/pathos """ def optimize(solver, mapper, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = ([random.uniform(-100,100) for i in range(ndim)] for i in range(N)) # minimize the function results = mapper(nodes).map(solver, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # build the solver-model pairs def powell_chebyshev(x0, *args, **kwds): # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver # Chebyshev cost function from poly import chebyshev4cost as the_model return the_solver(the_model, x0, monitor=False, *args, **kwds) # get the map functions from pathos.multiprocessing import ProcessPool as mppool if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') optimize(powell_chebyshev, mppool, nodes=10, target=target) shutdown() # end of file uqfoundation-pathos-33e3f91/examples2/optimize_cheby_powell_ppmap.py000077500000000000000000000042471467657623600261640ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Solve Nth-order Chebyshev polynomial coefficients with Powell's method. Launch optimizers with parallelpython's map. Requires: development version of mystic, pathos http://pypi.python.org/pypi/mystic http://pypi.python.org/pypi/pathos """ def optimize(solver, mapper, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = ([random.uniform(-100,100) for i in range(ndim)] for i in range(N)) # minimize the function results = mapper(nodes).map(solver, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # build the solver-model pairs def powell_chebyshev(x0, *args, **kwds): # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver # Chebyshev cost function from poly import chebyshev4cost as the_model return the_solver(the_model, x0, monitor=False, *args, **kwds) # get the map functions from pathos.parallel import ParallelPool as pppool if __name__ == '__main__': target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') optimize(powell_chebyshev, pppool, nodes=10, target=target) # end of file uqfoundation-pathos-33e3f91/examples2/optimize_helper.py000066400000000000000000000040301467657623600235550ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE def fmin_powell(cost, x0, full=1, disp=1, monitor=0): """ change default behavior for selected optimizers """ from mystic.solvers import fmin_powell as solver from mystic.monitors import Monitor, VerboseMonitor if monitor: mon = VerboseMonitor(10) else: mon = Monitor() npop = 10*len(x0) solved = solver(cost, x0, npop=npop, full_output=full, disp=disp, itermon=mon, handler=0) # return: solution, energy, generations, fevals return solved[0], solved[1], solved[2], solved[3] def diffev(cost, x0, full=1, disp=1, monitor=0): """ change default behavior for selected optimizers """ from mystic.solvers import diffev as solver from mystic.monitors import Monitor, VerboseMonitor if monitor: mon = VerboseMonitor(10) else: mon = Monitor() npop = 10*len(x0) solved = solver(cost, x0, npop=npop, full_output=full, disp=disp, itermon=mon, handler=0) # return: solution, energy, generations, fevals return solved[0], solved[1], solved[2], solved[3] def best_results(results): """ get the results with the lowest energy """ results = list(results) # in case we used an iterator best = list(results[0][0]), results[0][1] bestpath = results[0][2] besteval = results[0][3] func_evals = besteval for result in results[1:]: func_evals += result[3] # add function evaluations if result[1] < best[1]: # compare energy best = list(result[0]), result[1] bestpath = result[2] besteval = result[3] # return best: solution, energy, generations, fevals return best[0], best[1], bestpath, besteval, func_evals # EOF uqfoundation-pathos-33e3f91/examples2/optimize_powell.py000077500000000000000000000050551467657623600236130ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Minimize the selected model with Powell's method. Requires: development version of mystic, pathos, pyina http://pypi.python.org/pypi/mystic http://pypi.python.org/pypi/pathos http://pypi.python.org/pypi/pyina """ def optimize(solver, mapper, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock # Rosenbrock function from dejong import rosen as the_model ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev # Chebyshev cost function from poly import chebyshev4cost as the_model from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = ([random.uniform(-100,100) for i in range(ndim)] for i in range(N)) model = (the_model for i in range(N)) # minimize the function results = mapper(nodes).map(the_solver, model, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver # get the map functions from pathos.serial import SerialPool as serial from pathos.parallel import ParallelPool as pppool from pathos.multiprocessing import ProcessPool as mppool from pyina.launchers import Mpi as mpipool if __name__ == '__main__': target = 'rosen' #target = 'cheby' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') #NOTE: some of the below should fail, due to how objects are shipped in map optimize(the_solver, serial, nodes=2, target=target) #optimize(the_solver, mppool, nodes=2, target=target) #optimize(the_solver, pppool, nodes=2, target=target) #optimize(the_solver, mpipool, nodes=2, target=target) #XXX: Fails # end of file uqfoundation-pathos-33e3f91/examples2/optimize_rosen_powell_map.py000077500000000000000000000041671467657623600256610ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Minimize the Rosenbrock function with Powell's method. Launch optimizers with python's map. Requires: development version of mystic, pathos http://pypi.python.org/pypi/mystic http://pypi.python.org/pypi/pathos """ def optimize(solver, mapper, nodes, target='rosen', **kwds): if target == 'rosen': # 3d-rosenbrock ndim = 3 actual_coeffs = [1.0] * ndim pprint = list else: # 4th-order chebyshev from poly import chebyshev4coeffs as actual_coeffs ndim = len(actual_coeffs) from mystic.math import poly1d as pprint # number of trials N = nodes print("Number of trials: %s" % N) print("===============") # initial guess import random x0 = ([random.uniform(-100,100) for i in range(ndim)] for i in range(N)) # minimize the function results = mapper(nodes).map(solver, x0) # find the results with the lowest energy from optimize_helper import best_results solution = best_results(results) print("===============") print("Actual params:\n %s" % pprint(actual_coeffs)) print("Solved params:\n %s" % pprint(solution[0])) print("Function value: %s" % solution[1]) print("Total function evals: %s" % solution[4]) return # build the solver-model pairs def powell_rosen(x0, *args, **kwds): # Powell's Directonal solver from optimize_helper import fmin_powell as the_solver # Rosenbrock function from dejong import rosen as the_model return the_solver(the_model, x0, monitor=False, *args, **kwds) # get the map functions from pathos.serial import SerialPool as serial if __name__ == '__main__': target = 'rosen' print("Function: %s" % target) print("Solver: %s" % 'fmin_powell') optimize(powell_rosen, serial, nodes=10, target=target) # end of file uqfoundation-pathos-33e3f91/examples2/poly.py000066400000000000000000000034231467657623600213460ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ Chebyshev's polynomials """ from numpy import sum as numpysum from numpy import asarray from mystic.math import polyeval, poly1d # coefficients for specific Chebyshev polynomials chebyshev2coeffs = [2., 0., -1.] chebyshev4coeffs = [8., 0., -8., 0., 1.] chebyshev6coeffs = [32., 0., -48., 0., 18., 0., -1.] chebyshev8coeffs = [128., 0., -256., 0., 160., 0., -32., 0., 1.] chebyshev16coeffs = [32768., 0., -131072., 0., 212992., 0., -180224., 0., 84480., 0., -21504., 0., 2688., 0., -128., 0., 1] def chebyshevcostfactory(target): def chebyshevcost(trial,M=61): """The costfunction for order-n Chebyshev fitting. M evaluation points between [-1, 1], and two end points""" result=0.0 x=-1.0 dx = 2.0 / (M-1) for i in range(M): px = polyeval(trial, x) if px<-1 or px>1: result += (1 - px) * (1 - px) x += dx px = polyeval(trial, 1.2) - polyeval(target, 1.2) if px<0: result += px*px px = polyeval(trial, -1.2) - polyeval(target, -1.2) if px<0: result += px*px return result return chebyshevcost # prepared cost factories chebyshev2cost = chebyshevcostfactory(chebyshev2coeffs) chebyshev4cost = chebyshevcostfactory(chebyshev4coeffs) chebyshev6cost = chebyshevcostfactory(chebyshev6coeffs) chebyshev8cost = chebyshevcostfactory(chebyshev8coeffs) chebyshev16cost = chebyshevcostfactory(chebyshev16coeffs) # End of file uqfoundation-pathos-33e3f91/examples2/scatter_gather.py000077500000000000000000000026331467657623600233670ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """example: parallelism in python with mp4py Run with: > $ mpirun -np 2 scatter_gather.py """ import numpy as np from mpi4py import MPI comm = MPI.COMM_WORLD nodes = comm.size #2 my_N = 3 N = my_N * nodes # a print function that prints only to rank 0 def pprint(str="", end="\n", comm=comm): """Print for MPI parallel programs: Only rank 0 prints *str*.""" if comm.rank == 0: print(str, end=end) # set up the target arrays if comm.rank == 0: x = np.arange(N, dtype=np.float64) else: x = np.empty(N, dtype=np.float64) my_x = np.empty(my_N, dtype=np.float64) # scatter data into arrays on each node comm.Scatter( [x, MPI.DOUBLE], [my_x, MPI.DOUBLE] ) # print the input to screen pprint("Input:") for r in range(nodes): if comm.rank == r: print(" [node %d] %s" % (comm.rank, my_x)) comm.Barrier() # take the sin squared of all data pprint("Running on %d cores..." % nodes) my_x = np.sin(my_x)**2 # gather data into the head node comm.Gather( [my_x, MPI.DOUBLE], [x, MPI.DOUBLE] ) # print the ouput to screen pprint("Output:\n %s" % x) # EOF uqfoundation-pathos-33e3f91/pathos/000077500000000000000000000000001467657623600174055ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/pathos/__init__.py000066400000000000000000000046641467657623600215300ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Author: June Kim (jkim @caltech) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys parent = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) sys.path.append(parent) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(parent, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(parent, 'README.md')) del os, sys, parent, get_license_text, get_readme_as_rst # logger def logger(level=None, handler=None, **kwds): """generate a logger instance for pathos Args: level (int, default=None): the logging level. handler (object, default=None): a ``logging`` handler instance. name (str, default='pathos'): name of the logger instance. Returns: configured logger instance. """ import logging name = kwds.get('name', 'pathos') log = logging.getLogger(name) if handler is not None: log.handlers = [] log.addHandler(handler) elif not len(log.handlers): log.addHandler(logging.StreamHandler()) if level is not None: log.setLevel(level) return log # high-level interface from . import core from . import hosts from . import server from . import selector from . import connection from . import pools from . import maps # worker pools from . import serial from . import parallel from . import multiprocessing from . import threading # tools, utilities, etc from . import util from . import helpers # backward compatibility python = serial pp = parallel from pathos.secure import Pipe as SSH_Launcher from pathos.secure import Copier as SCP_Launcher from pathos.secure import Tunnel as SSH_Tunnel def license(): """print license""" print(__license__) return def citation(): """print citation""" print(__doc__[-491:-118]) return # end of file uqfoundation-pathos-33e3f91/pathos/__main__.py000066400000000000000000000135121467657623600215010ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ connect to the specified machine and start a 'server', 'tunnel', or both Notes: Usage: pathos_connect [hostname] [server] [remoteport] [profile] [hostname] - name of the host to connect to [server] - name of RPC server (assumes is installed on host) or 'tunnel' [remoteport] - remote port to use for communication or 'tunnel' [profile] -- name of shell profile to source on remote environment Examples:: $ pathos_connect computer.college.edu ppserver tunnel Usage: pathos_connect [hostname] [server] [remoteport] [profile] [hostname] - name of the host to connect to [server] - name of RPC server (assumes is installed on host) or 'tunnel' [remoteport] - remote port to use for communication or 'tunnel' [profile] -- name of shell profile to source on remote environment defaults are: "localhost" "tunnel" "" "" executing {ssh -N -L 22921:computer.college.edu:15058}' Server running at port=15058 with pid=4110 Connected to localhost at port=22921 Press to kill server """ ## tunnel: pathos_connect college.edu tunnel ## server: pathos_connect college.edu ppserver 12345 .profile ## both: pathos_connect college.edu ppserver tunnel .profile from pathos.core import * from pathos.hosts import get_profile, register_profiles if __name__ == '__main__': ##### CONFIGURATION & INPUT ######################## # set the default remote host rhost = 'localhost' #rhost = 'foobar.internet.org' #rhost = 'computer.college.edu' # set any 'special' profiles (those which don't use default_profie) profiles = {} #profiles = {'foobar.internet.org':'.profile', # 'computer.college.edu':'.cshrc'} # set the default port rport = '' _rport = '98909' # set the default server command server = 'tunnel' #server = 'ppserver' #XXX: "ppserver -p %s" % rport #server = 'classic_server' #XXX: "classic_server -p %s" % rport #server = 'registry_server' #XXX: "registry_server -p %s" % rport print("""Usage: pathos_connect [hostname] [remoteport] [server] [profile] Usage: pathos_connect [hostname] [server] [remoteport] [profile] [hostname] - name of the host to connect to [server] - name of RPC server (assumes is installed on host) or 'tunnel' [remoteport] - remote port to use for communication or 'tunnel' [profile] -- name of shell profile to source on remote environment defaults are: "%s" "%s" "%s" "%s".""" % (rhost, server, rport, '')) # get remote hostname from user import sys if '--help' in sys.argv: sys.exit(0) try: myinp = sys.argv[1] except: myinp = None if myinp: rhost = myinp #XXX: should test rhost validity here... (how ?) else: pass # use default del myinp # get server to run from user try: myinp = sys.argv[2] except: myinp = None if myinp: server = myinp #XXX: should test validity here... (filename) else: pass # use default del myinp # set the default 'port' if server == 'tunnel': tunnel = True server = None else: tunnel = False rport = rport if tunnel else _rport # get remote port to run server on from user try: myinp = sys.argv[3] except: myinp = None if myinp: if tunnel: # tunnel doesn't take more inputs msg = "port '%s' not valid for 'tunnel'" % myinp raise ValueError(msg) rport = myinp #XXX: should test validity here... (filename) else: pass # use default del myinp # is it a tunneled server? tunnel = True if (tunnel or rport == 'tunnel') else False rport = '' if rport == 'tunnel' else rport # get remote profile (this should go away soon) try: myinp = sys.argv[4] except: myinp = None if myinp: rprof = myinp #XXX: should test validity here... (filename) profiles = {rhost:rprof} else: pass # use default del myinp # my remote environment (should be auto-detected) register_profiles(profiles) profile = get_profile(rhost) ##### CONFIGURATION & INPUT ######################## ## tunnel: pathos_connect foo.college.edu tunnel ## server: pathos_connect foo.college.edu ppserver 12345 .profile ## both: pathos_connect foo.college.edu ppserver tunnel .profile if tunnel: # establish ssh tunnel tunnel = connect(rhost) lport = tunnel._lport rport = tunnel._rport print('executing {ssh -N -L %d:%s:%d}' % (lport, rhost, rport)) else: lport = '' if server: # run server rserver = serve(server, rhost, rport, profile=profile) response = rserver.response() if response: if tunnel: tunnel.disconnect() print(response) raise OSError('Failure to start server') # get server pid #FIXME: launcher.pid is not pid(server) target = '[P,p]ython[^#]*'+server # filter w/ regex for python-based server try: pid = getpid(target, rhost) except OSError: print("Cleanup on host may be required...") if tunnel: tunnel.disconnect() raise # test server # XXX: add a simple one-liner... print("\nServer running at port=%s with pid=%s" % (rport, pid)) if tunnel: print("Connected to localhost at port=%s" % (lport)) print('Press to kill server') else: print('Press to disconnect') sys.stdin.readline() if server: # stop server print(kill(pid,rhost)) # del rserver #XXX: delete should run self.kill (?) if tunnel: # disconnect tunnel tunnel.disconnect() # FIXME: just kills 'ssh', not the tunnel # get local pid: ps u | grep "ssh -N -L%s:%s$s" % (lport,rhost,rport) # kill -15 int(tunnelpid) # EOF uqfoundation-pathos-33e3f91/pathos/_ppserver_config.py000066400000000000000000000010221467657623600233040ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """defalut ppserver host and port configuration""" #tunnelports = ['12345','67890'] tunnelports = [] ppservers = tuple(["localhost:%s" % port for port in tunnelports]) # End of file uqfoundation-pathos-33e3f91/pathos/abstract_launcher.py000066400000000000000000000246421467657623600234530ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module contains the base classes for pathos pool and pipe objects, and describes the map and pipe interfaces. A pipe is defined as a connection between two 'nodes', where a node is something that does work. A pipe may be a one-way or two-way connection. A map is defined as a one-to-many connection between nodes. In both map and pipe connections, results from the connected nodes can be returned to the calling node. There are several variants of pipe and map, such as whether the connection is blocking, or ordered, or asynchronous. For pipes, derived methods must overwrite the 'pipe' method, while maps must overwrite the 'map' method. Pipes and maps are available from worker pool objects, where the work is done by any of the workers in the pool. For more specific point-to-point connections (such as a pipe between two specific compute nodes), use the pipe object directly. Usage ===== A typical call to a pathos map will roughly follow this example: >>> # instantiate and configure the worker pool >>> from pathos.pools import ProcessPool >>> pool = ProcessPool(nodes=4) >>> >>> # do a blocking map on the chosen function >>> results = pool.map(pow, [1,2,3,4], [5,6,7,8]) >>> >>> # do a non-blocking map, then extract the results from the iterator >>> results = pool.imap(pow, [1,2,3,4], [5,6,7,8]) >>> print("...") >>> results = list(results) >>> >>> # do an asynchronous map, then get the results >>> results = pool.amap(pow, [1,2,3,4], [5,6,7,8]) >>> while not results.ready(): ... time.sleep(5); print(".", end=' ') ... >>> results = results.get() Notes ===== Each of the pathos worker pools rely on a different transport protocol (e.g. threads, multiprocessing, etc), where the use of each pool comes with a few caveats. See the usage documentation and examples for each worker pool for more information. """ __all__ = ['AbstractPipeConnection', 'AbstractWorkerPool'] class AbstractPipeConnection(object): """ AbstractPipeConnection base class for pathos pipes. """ def __init__(self, *args, **kwds): """ Required input: ??? Additional inputs: ??? Important class members: ??? Other class members: ??? """ object.__init__(self)#, *args, **kwds) return def __repr__(self): return "" % self.__class__.__name__ # interface pass class AbstractWorkerPool(object): # base for worker pool strategy or all maps? """ AbstractWorkerPool base class for pathos pools. """ __nodes = 1 def __init__(self, *args, **kwds): """ Important class members: nodes - number (and potentially description) of workers ncpus - number of worker processors servers - list of worker servers scheduler - the associated scheduler workdir - associated $WORKDIR for scratch calculations/files Other class members: scatter - True, if uses 'scatter-gather' (instead of 'worker-pool') source - False, if minimal use of TemporaryFiles is desired timeout - number of seconds to wait for return value from scheduler """ object.__init__(self)#, *args, **kwds) self.__init(*args, **kwds) self._id = kwds.get('id', None) return def __enter__(self): return self def __exit__(self, *args): #self.clear() return def __init(self, *args, **kwds): """default filter for __init__ inputs """ # allow default arg for 'nodes', but not if in kwds if len(args): try: nodes = kwds['nodes'] msg = "got multiple values for keyword argument 'nodes'" raise TypeError(msg) except KeyError: nodes = args[0] else: nodes = kwds.get('nodes', self.__nodes) try: self.nodes = nodes except TypeError: pass # then self.nodes is read-only return def __map(self, f, *args, **kwds): """default filter for map inputs """ # barf if given keywords if kwds: pass # raise TypeError("map() takes no keyword arguments") #raise TypeError("'%s' is an invalid keyword for this function" % kwds.keys()[0]) # at least one argument is required try: argz = [args[0]] except IndexError: raise TypeError("map() requires at least two args") return def __imap(self, f, *args, **kwds): """default filter for imap inputs """ # barf if given keywords if kwds: pass # raise TypeError("map() does not take keyword arguments") #raise TypeError("'%s' is an invalid keyword for this function" % kwds.keys()[0]) # at least one argument is required try: argz = [args[0]] except IndexError: raise TypeError("imap() must have at least two arguments") return def __pipe(self, f, *args, **kwds): #FIXME: need to think about this... """default filter for pipe inputs """ # barf if given keywords if kwds: pass # raise TypeError("pipe() does not take keyword arguments") #raise TypeError("'%s' is an invalid keyword for this function" % kwds.keys()[0]) # a valid number of arguments are required try: vars = f.__code__.co_argcount defs = len(f.__defaults__) arglen = len(args) minlen = vars - defs if vars == minlen and arglen != vars: #XXX: argument vs arguments raise TypeError("%s() takes at exactly %s arguments (%s given)" % (f.__name__(), str(vars), str(arglen))) elif arglen > vars: raise TypeError("%s() takes at most %s arguments (%s given)" % (f.__name__(), str(vars), str(arglen))) elif arglen < (vars - defs): raise TypeError("%s() takes at least %s arguments (%s given)" % (f.__name__(), str(vars - defs), str(arglen))) except: pass return def _serve(self, *args, **kwds): """Create a new server if one isn't already initialized""" raise NotImplementedError #_pool = None #return _pool def clear(self): """Remove server with matching state""" raise NotImplementedError #return #XXX: return _pool? (i.e. pop) def map(self, f, *args, **kwds): """run a batch of jobs with a blocking and ordered map Returns a list of results of applying the function f to the items of the argument sequence(s). If more than one sequence is given, the function is called with an argument list consisting of the corresponding item of each sequence. Some maps accept the `chunksize` keyword, which causes the sequence to be split into tasks of approximately the given size. """ #self.__map(f, *args, **kwds) raise NotImplementedError def imap(self, f, *args, **kwds): """run a batch of jobs with a non-blocking and ordered map Returns a list iterator of results of applying the function f to the items of the argument sequence(s). If more than one sequence is given, the function is called with an argument list consisting of the corresponding item of each sequence. Some maps accept the `chunksize` keyword, which causes the sequence to be split into tasks of approximately the given size. """ #self.__imap(f, *args, **kwds) raise NotImplementedError def uimap(self, f, *args, **kwds): """run a batch of jobs with a non-blocking and unordered map Returns a list iterator of results of applying the function f to the items of the argument sequence(s). If more than one sequence is given, the function is called with an argument list consisting of the corresponding item of each sequence. The order of the resulting sequence is not guaranteed. Some maps accept the `chunksize` keyword, which causes the sequence to be split into tasks of approximately the given size. """ #self.__imap(f, *args, **kwds) raise NotImplementedError def amap(self, f, *args, **kwds): """run a batch of jobs with an asynchronous map Returns a results object which containts the results of applying the function f to the items of the argument sequence(s). If more than one sequence is given, the function is called with an argument list consisting of the corresponding item of each sequence. To retrieve the results, call the get() method on the returned results object. The call to get() is blocking, until all results are retrieved. Use the ready() method on the result object to check if all results are ready. Some maps accept the `chunksize` keyword, which causes the sequence to be split into tasks of approximately the given size. """ #self.__map(f, *args, **kwds) raise NotImplementedError ######################################################################## # PIPES def pipe(self, f, *args, **kwds): """submit a job and block until results are available Returns result of calling the function f on a selected worker. This function will block until results are available. """ #self.__pipe(f, *args, **kwds) raise NotImplementedError def apipe(self, f, *args, **kwds): # register a callback ? """submit a job asynchronously to a queue Returns a results object which containts the result of calling the function f on a selected worker. To retrieve the results, call the get() method on the returned results object. The call to get() is blocking, until the result is available. Use the ready() method on the results object to check if the result is ready. """ #self.__pipe(f, *args, **kwds) raise NotImplementedError ######################################################################## def __repr__(self): return "" % self.__class__.__name__ def __get_nodes(self): """get the number of nodes in the pool""" return self.__nodes def __set_nodes(self, nodes): """set the number of nodes in the pool""" raise TypeError("nodes is a read-only attribute") # interface pass uqfoundation-pathos-33e3f91/pathos/connection.py000066400000000000000000000151531467657623600221230ustar00rootroot00000000000000#!/usr/bin/env python # # Originally from pythia-0.8 pyre.mpi.Launcher.py (svn:danse.us/pyre -r2) # Forked by: Mike McKerns (January 2004) # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2004-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module contains the base class for popen pipes, and describes the popen pipe interface. The 'config' method can be overwritten for pipe customization. The pipe's 'launch' method can be overwritten with a derived pipe's new execution algorithm. See the following for an example of standard use. Usage ===== A typical call to a popen 'pipe' will roughly follow this example: >>> # instantiate the pipe >>> pipe = Pipe() >>> >>> # configure the pipe to stage the command >>> pipe(command='hostname') >>> >>> # execute the launch and retrieve the response >>> pipe.launch() >>> print(pipe.response()) """ __all__ = ['Pipe', 'PipeException'] import os import sys import signal import random import string from pathos.selector import Selector from pathos.util import _str class PipeException(Exception): '''Exception for failure to launch a command''' pass # broke backward compatability: 30/05/14 ==> replace base-class almost entirely class Pipe(object): """a popen-based pipe for parallel and distributed computing""" verbose = True from pathos import logger _debug = logger(level=30) # logging.WARN del logger def __init__(self, name=None, **kwds): """create a popen-pipe Inputs: name: a unique identifier (string) for the pipe command: a command to send [default = 'echo '] background: run in background [default = False] decode: ensure response is 'ascii' [default = True] stdin: file-like object to serve as standard input for the remote process """ xyz = string.ascii_letters self.name = ''.join(random.choice(xyz) for i in range(16)) \ if name is None else name self.background = kwds.pop('background', False) self.stdin = kwds.pop('stdin', sys.stdin) self.codec = kwds.pop('decode', 'ascii') self.message = kwds.pop('command', 'echo %s' % self.name) #' '? self._response = None self._pid = 0 self.config(**kwds) return def __repr__(self): return "Pipe('%s')" % self.message def config(self, **kwds): '''configure the pipe using given keywords (Re)configure the pipe for the following inputs: command: a command to send [default = 'echo '] background: run in background [default = False] decode: ensure response is 'ascii' [default = True] stdin: file-like object to serve as standard input for the remote process ''' if self.message is None: self.message = 'echo %s' % self.name #' '? if self.stdin is None: self.stdin = sys.stdin if self.codec is None: self.codec = 'ascii' for key, value in kwds.items(): if key == 'command': self.message = value elif key == 'background': self.background = value elif key == 'decode': self.codec = value elif key == 'stdin': self.stdin = value self._stdout = None names=['message','background','stdin','codec'] return dict((i,getattr(self, i)) for i in names) def launch(self): '''launch a configured command''' self._response = None self._execute() # preempt with pox.which(message.split()[0]) ? return def _execute(self): #'''execute by piping the command, & saving the file object''' from subprocess import Popen, PIPE, STDOUT #XXX: what if saved list/dict of _stdout instead of just the one? # could associated name/_pid and _stdout if self.background: #Spawn a background process try: p = Popen(self.message, shell=True, stdin=self.stdin, stdout=PIPE, stderr=STDOUT, close_fds=True) except: raise PipeException('failure to pipe: %s' % self.message) self._pid = p.pid #get fileobject pid self._stdout = p.stdout #save fileobject else: try: p = Popen(self.message, shell=True, stdin=self.stdin, stdout=PIPE) except: raise PipeException('failure to pipe: %s' % self.message) self._stdout = p.stdout self._pid = 0 #XXX: MMM --> or -1 ? return def response(self): '''Return the response from the launched process. Return None if no response was received yet from a background process. ''' #XXX: return bytes, decode to ascii, take encoding, or ??? if self._stdout is None: raise PipeException("'launch' is required after any reconfiguration") if self.codec is True: codec = 'ascii' elif self.codec is False: codec = False elif self.codec is None: codec = False else: codec = self.codec if self._response is not None: return _str(self._response, codec) # when running in foreground _pid is 0 (may change to -1) if self._pid <= 0: self._response = self._stdout.read() return _str(self._response, codec) # handle response from a background process def onData(selector, fobj): if self.verbose: print("handling pipe response") self._debug.info('on_remote') self._response = fobj.read() selector.state = False return def onTimeout(selector): selector.state = False sel = Selector() #sel._info.activate() sel.notifyOnReadReady(self._stdout, onData) sel.notifyWhenIdle(onTimeout) sel.watch(2.0) # reset _response to None to allow capture of a next response # from a background process return _str(self._response, codec) def pid(self): '''get pipe pid''' return self._pid def kill(self): '''terminate the pipe''' if self._pid > 0: if self.verbose: print('Kill pid=%d' % self._pid) os.kill(self._pid, signal.SIGTERM) os.waitpid(self._pid, 0) self._pid = 0 return # interface __call__ = config pass # End of file uqfoundation-pathos-33e3f91/pathos/core.py000066400000000000000000000256551467657623600207240ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ high-level programming interface to core pathos utilities """ __all__ = ['copy', 'execute', 'kill', 'getpid', 'getppid', 'getchild', \ 'serve', 'connect', 'randomport'] import os import string import re import pathos # standard pattern for 'ps axj': '... ddddd ddddd ddddd ...' _psaxj = re.compile(r"((\S+\s+)?\d+\s+\d+\s+\d+\s)") def copy(source, destination=None, **kwds): '''copy source to (possibly) remote destination Execute a copy, and return the copier. Use 'kill' to kill the copier, and 'pid' to get the process id for the copier. Args: source -- path string of source 'file' destination -- path string for destination target ''' #XXX: options, background, stdin can be set w/ kwds (also name, launcher) if destination is None: destination = os.getcwd() from pathos.secure import Copier opt = kwds.pop('options', None) kwds['background'] = kwds.pop('bg', False) # ignores 'background' copier = Copier(**kwds) if ':' in source or ':' in destination: if opt is None: opt = '-q -r' copier(options=opt, source=source, destination=destination) else: if opt is None: opt = '-r' copier(launcher='cp', options=opt, source=source, destination=destination) pathos.logger().info('executing {%s}', copier.message) copier.launch() copier.kill() return copier def execute(command, host=None, bg=True, **kwds): '''execute a command (possibly) on a remote host Execute a process, and return the launcher. Use 'response' to retrieve the response from the executed command. Use 'kill' to kill the launcher, and 'pid' to get the process id for the launcher. Args: command -- command string to be executed host -- hostname of execution target [default = None (i.e. run locally)] bg -- run as background process? [default = True] ''' #XXX: options, background, stdin can be set w/ kwds (also name, launcher) bg = bool(bg) # overrides 'background' if host in [None, '']: from pathos.connection import Pipe launcher = Pipe(**kwds) launcher(command=command, background=bg) else: from pathos.secure import Pipe opt = kwds.pop('options', '-q') launcher = Pipe(**kwds) launcher(options=opt, command=command, host=host, background=bg) pathos.logger().info('executing {%s}', launcher.message) launcher.launch() #response = launcher.response() #launcher.kill() #return response return launcher #XXX: add local-only versions of kill and *pid to pox? #XXX: use threading.Timer (or sched) to schedule or kill after N seconds? def kill(pid, host=None, **kwds): '''kill a process (possibly) on a remote host Args: pid -- process id host -- hostname where process is running [default = None (i.e. locally)] ''' #XXX: launcher has "kill self" method; use it? note that this is different? command = 'kill -n TERM %s' % pid #XXX: use TERM=15 or KILL=9 ? getpid(pid, host) # throw error if pid doesn't exist #XXX: bad idea? response = execute(command, host, bg=False, **kwds).response() return response #XXX: how handle failed response? bg=True prints, bg=False returns stderr def _psax(response, pattern=None): """strips out bad lines in 'ps ax' response Takes multi-line string, response from execute('ps ax') or execute('ps axj'). Takes an optional regex pattern for finding 'good' lines. If pattern is None, assumes 'ps ax' was called. """ if not response: return response if pattern: response = (line for line in response.split('\n') if pattern.match(line)) else: # a 'ps ax' line should start with a 'digit'; " PID THING ..." response = (line for line in response.split('\n') \ if line and line.lstrip()[0] in string.digits) return '\n'.join(response) def getpid(target=None, host=None, all=False, **kwds): '''get the process id for a target process (possibly) running on remote host This method should only be used as a last-ditch effort to find a process id. This method __may__ work when a child has been spawned and the pid was not registered... but there's no guarantee. If target is None, then get the process id of the __main__ python instance. Args: target -- string name of target process host -- hostname where process is running all -- get all resulting lines from query? [default = False] ''' from numbers import Integral if target is None: if all: target = '' elif host: raise OSError('[Error 3] No such process') else: return os.getpid() elif isinstance(target, Integral): #NOTE: passing pid useful for all=True target = "%5d " % target #NOTE: assumes max pid is 99999 #command = "ps -A | grep '%s'" % target # 'other users' only command = "ps ax | grep '%s'" % target # 'all users' response = _psax(execute(command, host, bg=False, **kwds).response()) ignore = "grep %s" % target if all: return response try: # select the PID # find most recent where "grep '%s'" not in line pid = sorted(_select(line,(0,))[0] \ for line in response.split('\n') if line and ignore not in line \ and command not in line) if pid is None: raise OSError('Failure to recover process id') #XXX: take advantage of *ppid to help match correct pid? return int(pid[-1]) except (AttributeError, IndexError): raise OSError('[Error 3] No such process') def _select(line, indx): '''select the correct data from the string, using the given index Takes a single string line, and a tuple of positional indicies. ''' line = line.split() if max(indx) > len(line) - 1: return (None,None) # for the off chance there's a bad line return tuple(line[i] for i in indx) def getppid(pid=None, host=None, group=False): # find parent of pid '''get parent process id (ppid) for the given process If pid is None, the pid of the __main__ python instance will be used. Args: pid -- process id host -- hostname where process is running group -- get parent group id (pgid) instead of direct parent id? ''' if pid is None: if host: raise OSError('[Error 3] No such process') return os.getpgrp() if group else os.getppid() pid = str(pid) command = "ps axj" response = execute(command, host).response() if response is None: raise OSError('[Errno 3] No such process') # analyze header for correct pattern and indx head = (line for line in response.split('\n') if 'PPID' in line) try: head = next(head).split() except StopIteration: raise OSError('Failure to recover process id') parent = 'PGID' if group else 'PPID' indx = (head.index('PID'), head.index(parent)) # extract good data lines from response response = _psax(response, pattern=_psaxj) # select the PID and parent id response = dict(_select(line,indx) for line in response.split('\n') if line) response = response.get(pid, None) if response is None: raise OSError('[Errno 3] No such process') return int(response) def getchild(pid=None, host=None, group=False): # find all children of pid '''get all child process ids for the given parent process id (ppid) If pid is None, the pid of the __main__ python instance will be used. Args: pid -- parent process id host -- hostname where process is running group -- get process ids for the parent group id (pgid) instead? ''' if pid is None: if host: raise OSError('[Error 3] No such process') pid = getpid() pid = str(pid) command = "ps axj" response = execute(command, host).response() if response is None: raise OSError('[Errno 3] No such process') # analyze header for correct pattern and indx head = (line for line in response.split('\n') if 'PPID' in line) try: head = next(head).split() except StopIteration: raise OSError('Failure to recover process id') parent = 'PGID' if group else 'PPID' indx = (head.index('PID'), head.index(parent)) # extract good data lines from response response = _psax(response, pattern=_psaxj) # select the PID and parent id response = dict(_select(line,indx) for line in response.split('\n') if line) children = [int(key) for (key,value) in response.items() if value == pid] if children: return children if not group: # check to see if given 'PID' actually exists exists = [int(key) for (key,value) in response.items() if key == pid] else: exists = False # if 'PGID' not found, then doesn't exist if exists: return children raise OSError('[Errno 3] No such process') def randomport(host=None): '''select a open port on a (possibly) remote host Args: host -- hostname on which to select a open port ''' from pathos.portpicker import randomport if not host: return randomport() from pathos.secure import Pipe from pathos.portpicker import __file__ as src # make sure src is a .py file, not .pyc or .pyo src = src.rstrip('co') launcher = Pipe() #XXX: use pox.which / which_python? launcher(command='python', host=host, background=False, stdin=open(src)) pathos.logger().info('executing {python <%s} on %s', src, host) launcher.launch() try: rport = int(launcher.response()) except: from pathos.secure import TunnelException raise TunnelException("failure to pick remote port") # return remote port number return rport def connect(host, port=None, through=None): '''establish a secure tunnel connection to a remote host at the given port Args: host -- hostname to which a tunnel should be established port -- port number (on host) to connect the tunnel to through -- 'tunnel-through' hostname [default = None] ''' from pathos.secure import Tunnel t = Tunnel() t.connect(host, port, through) return t #FIXME: needs work... def serve(server, host=None, port=None, profile='.bash_profile'): '''begin serving RPC requests Args: server: name of RPC server (i.e. 'ppserver') host: hostname on which a server should be launched port: port number (on host) that server will accept request at profile: file to configure the user's environment [default='.bash_profile'] ''' if host is None: #XXX: and...? profile = '' else: profile = 'source %s; ' % profile file = '~/bin/%s.py' % server #XXX: _should_ be on the $PATH if port is None: port = randomport(host) command = "%s -p %s" % (file,port) rserver = execute(command, host, bg=True) response = rserver.response() pathos.logger().info('response = %r', response) if response in ['', None]: #XXX: other responses allowed (?) pass else: #XXX: not really error checking... pathos.logger().error('invalid response = %r', response) from time import sleep delay = 2.0 sleep(delay) return rserver if __name__ == '__main__': pass uqfoundation-pathos-33e3f91/pathos/helpers/000077500000000000000000000000001467657623600210475ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/pathos/helpers/__init__.py000066400000000000000000000017501467657623600231630ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from . import pp_helper from . import mp_helper import ppft as parallelpython try: import multiprocess as mp from multiprocess.pool import Pool as ProcessPool from multiprocess import cpu_count from multiprocess.dummy import Pool as ThreadPool from multiprocess import freeze_support except ImportError: # fall-back to package distributed with python import multiprocessing as mp from multiprocessing.pool import Pool as ProcessPool from multiprocessing import cpu_count from multiprocessing.dummy import Pool as ThreadPool from multiprocessing import freeze_support from pathos.pools import _clear as shutdown uqfoundation-pathos-33e3f91/pathos/helpers/mp_helper.py000066400000000000000000000051511467657623600233760ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ map helper functions """ # random_state and random_seed copied from mystic.tools def starargs(f): """decorator to convert a many-arg function to a single-arg function""" func = lambda args: f(*args) #func.__module__ = f.__module__ #func.__name__ = f.__name__ doc = "\nNOTE: all inputs have been compressed into a single argument" if f.__doc__: func.__doc__ = f.__doc__ + doc return func #from functools import update_wrapper #return update_wrapper(func, f) def random_seed(s=None): "sets the seed for calls to 'random()'" import random random.seed(s) try: from numpy import random random.seed(s) except: pass return def random_state(module='random', new=False, seed='!'): """return a (optionally manually seeded) random generator For a given module, return an object that has random number generation (RNG) methods available. If new=False, use the global copy of the RNG object. If seed='!', do not reseed the RNG (using seed=None 'removes' any seeding). If seed='*', use a seed that depends on the process id (PID); this is useful for building RNGs that are different across multiple threads or processes. """ import random if module == 'random': rng = random elif not isinstance(module, type(random)): # convienence for passing in 'numpy' if module == 'numpy': module = 'numpy.random' try: import importlib rng = importlib.import_module(module) except ImportError: rng = __import__(module, fromlist=module.split('.')[-1:]) elif module.__name__ == 'numpy': # convienence for passing in numpy from numpy import random as rng else: rng = module _rng = getattr(rng, 'RandomState', None) or \ getattr(rng, 'Random') # throw error if no rng found if new: rng = _rng() if seed == '!': # special case: don't reset the seed return rng if seed == '*': # special case: random seeding for multiprocessing try: import multiprocessing as mp seed = mp.current_process().pid except: seed = 0 import time seed += int(time.time()*1e6) # set the random seed (or 'reset' with None) rng.seed(seed) return rng uqfoundation-pathos-33e3f91/pathos/helpers/pp_helper.py000066400000000000000000000201331467657623600233760ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE try: from multiprocess import TimeoutError from multiprocess.pool import MapResult as _MapResult from multiprocess.pool import ApplyResult as _ApplyResult except ImportError: # fall-back to package distributed with python from multiprocessing import TimeoutError from multiprocessing.pool import MapResult as _MapResult from multiprocessing.pool import ApplyResult as _ApplyResult from ppft import _Task from ppft import Server import time import dill as pickle import threading #FIXME: this is a sloppy kludge... inheritance or not, this is not clean class ApplyResult(_Task): #XXX: better if not derived from _Task? """result object for an 'apply' method in parallelpython enables a pp._Task to mimic the multiprocessing.pool.ApplyResult interface """ #XXX: allow callback etc in __init__ ? def __init__(self, task):# callback=None, callbackargs=(), group='default'): if not isinstance(task, _Task): msg = "a pp._Task (generated by server.submit) is required" raise TypeError(msg) #interface: _Task self.unpickled = False #interface: _ApplyResult self._task = task self._success = True return def ready(self): "Checks if the result is ready" return self.finished def successful(self): "Measures whether result is ready and loaded w/o printing" assert self.ready() if not self.unpickled: self.__unpickle() return self._success def __unpickle(self): """Unpickles the result of the task""" self.result, sout = pickle.loads(self._task.sresult) self.unpickled = True if len(sout) > 0: print(sout, end=' ') #XXX: breaks 2.5 compatibility self._success = False #XXX: we assume sout>0 is an error else: self._success = True #XXX: we assume sout=0 is ok if self.callback: args = self.callbackargs + (self.result, ) self.callback(*args) def wait(self, timeout=None): #XXX: None is blocking """Waits for the task""" if not self.finished: cond = threading.Condition(self._task.lock) #XXX: or need Rlock??? cond.acquire() try: if not self._task.finished: cond.wait(timeout) #FIXME: ignores timeout, and blocks finally: cond.release() return def get(self, timeout=None): "Retrieves result of the task" self.wait(timeout) if not self.finished: raise TimeoutError return self.__call__() def __call__(self, raw_result=False): """Retrieves result of the task""" self.wait() if not self.unpickled and not raw_result: self.__unpickle() if raw_result: return self._task.sresult else: return self.result def finalize(self, sresult): """Finalizes the task ***internal use only***""" self._task.sresult = sresult if self.callback: self.__unpickle() self.finished = True #interface: _Task @property def lock(self): return self._task.lock @property def tid(self): return self._task.tid @property def server(self): return self._task.server @property def callback(self): return self._task.callback @property def callbackargs(self): return self._task.callbackargs @property def group(self): return self._task.group @property def finished(self): return self._task.finished pass class MapResult(object): def __init__(self, size, callback=None, callbackargs=(), group='default'): chunksize, length = size #interface: ApplyResult self.callback = callback self.callbackargs = callbackargs self.group = group self.finished = False self.unpickled = False self._success = True #interface: _MapResult self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self.finished = True else: self._number_left = length//chunksize + bool(length % chunksize) #interface: list self.__queue = () self.__tasks = [] return def finalize(self, *results): # should be a 'sresult' (pickled result) "finalize the tasks ***internal use only***" [task.finalize(result) for (task,result) in zip(self.__tasks,results)] if self.callback: self.__unpickle() #XXX: better known as 'fetch the results' self.finished = True return #FIXME: this is probably broken... needs testing!!! def __unpickle(self): """Unpickles the results of the tasks""" if not self.unpickled: self.__queue = list(self.__queue) #XXX: trigger fetch of results self.unpickled = True if self.callback: args = self.callbackargs + (self._value, ) self.callback(*args) def queue(self, *tasks): # expects list of ApplyResult objects "Fill the MapResult with ApplyResult objects" valid = [isinstance(task, ApplyResult) for task in tasks] if not all(valid): tasks = list(tasks) _valid = [isinstance(task, _Task) for task in tasks] if not all(_valid): #XXX: above could be more efficient id = _valid.index(False) msg = "%s is not a pp._Task instance" % tasks[id] raise TypeError(msg) while valid.count(False): ind = valid.index(False) tasks[ind] = ApplyResult(tasks[ind]) valid[ind] = True self.__queue = (self._set(i,task) for (i,task) in enumerate(tasks)) self.__tasks = tasks self.finished = False self.unpickled = False return def __call__(self): """Retrieve the results of the tasks""" self.wait() if not self.unpickled: self.__unpickle() return self._value def wait(self, timeout=None): "Wait for the tasks" if not self.ready(): for task in self.__tasks: task.wait(timeout) #XXX: better one-time timeout or n-time ? if timeout is None: continue timeout = 0 #return self.ready() #XXX: better if return T/F ? #if self.ready(): # self.__unpickle() #XXX: better if callback...? #return def get(self, timeout=None): "Retrieves results of the tasks" self.wait(timeout) if not self.ready(): raise TimeoutError return self.__call__() def ready(self): "Checks if the result is ready" self.finished = all([task.finished for task in self.__tasks]) return self.finished def successful(self): "Measures whether result is ready and loaded w/o printing" assert self.ready() if not self.unpickled: self.__unpickle() return self._success def _set(self, i, task): #XXX: unordered by how fill _value & imap in _set? task.wait() success, result = task.successful(), [task.result] if success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result self._number_left -= 1 if self._number_left == 0: self._success = True self.unpickled = True #self.__unpickle() self.finished = True else: self._success = False self.unpickled = True self._value = result print(result, end=' ') #XXX: breaks 2.5 compatibility self.finished = True return task pass # EOF uqfoundation-pathos-33e3f91/pathos/hosts.py000066400000000000000000000024701467657623600211220ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # dictionary of known host/profile pairs """ high-level programming interface to pathos host registry """ default_profile = '.bash_profile' _profiles = { } """ For example, to register two 'known' host profiles: _profiles = { \ 'foobar.danse.us':'.profile', \ 'computer.cacr.caltech.edu':'.cshrc', \ } """ def get_profile(rhost, assume=True): '''get the default $PROFILE for a remote host''' if rhost in _profiles: return _profiles[rhost] if assume: return default_profile return def get_profiles(): '''get $PROFILE for each registered host''' return _profiles def register_profiles(profiles): '''add dict of {'host':$PROFILE} to registered host profiles''' #XXX: needs parse checking of input _profiles.update(profiles) return def register(rhost, profile=None): '''register a host and $PROFILE''' if profile == None: profile = default_profile #XXX: needs parse checking of input _profiles[rhost] = profile return # EOF uqfoundation-pathos-33e3f91/pathos/maps.py000066400000000000000000000516111467657623600207230ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ maps: stand-alone map-like objects using lazy pool instantiation """ class Map(object): def __init__(self, pool=None, *args, **kwds): """map instance with internal lazy pool instantiation Args: pool: pool object (i.e. pathos.pools.ProcessPool) *args: positional arguments for pool initialization **kwds: keyword arguments for pool initialization close: if True, close the pool to any new jobs [Default: False] join: if True, reclaim the pool's closed workers [Default: False] clear: if True, delete the pool singleton [Default: False] NOTE: if a pool object is not provided, a builtins.map will be used with the returned iterator cast to a list. NOTE: pools from both multiprocess and pathos.pools can be used, however the behavior is slightly different. Pools from both pathos and multiprocess have close and join methods, to close the pool to new jobs, and to shut down the pool's workers. Pools from pathos, however, are launched as singletons, so they also include a clear method that deletes the singleton. In either case, a pool that has been "closed" will throw a ValueError if map is then called, and similarly, a ValueError will be thrown if join is called before a pool is "closed". The major difference is that if a pathos.pool is closed, the map instance cannot run new jobs until "clear" is called, while a new multiprocess pool will be created each time the map is executed. This leads to pathos.pools generally being called with either ``clear=True`` or ``clear=False``, and pools from multprocess either using ``close=True`` or ``join=True`` or both. Some hierarchical parallel workflows are not allowed, and will result in an error being thrown; however, changing close, join, or clear can often remove the error. """ self._clear = kwds.pop('clear', False) self._join = kwds.pop('join', self._clear) self._close = kwds.pop('close', self._join) self.pool = None if pool is map else pool self.args = args self.kwds = kwds self._pool = None def __call__(self, func, *args, **kwds): """instantiate a pool and execute the pool's map Args: func: function object to map *args: positional arguments for map **kwds: keyword arguments for map Returns: results from execution of ``map(func, *args, **kwds)`` NOTE: initializes a new worker pool with each call """ if self.pool is None: #XXX: args, kwds? return list(map(func, *args)) #XXX: iterator or list? self._pool = pool = self.pool(*self.args, **self.kwds) result = pool.map(func, *args, **kwds) if self._close: self.close() if self._join: self.join() if self._clear: self.clear() return result #NOTE: ValueError on non-running pool # function interface def __cls__(self): return self def __meth__(self): return self.__call__.__func__ def __attr__(self): return self.__call__.__get__ __self__ = property(__cls__) __func__ = property(__meth__) __get__ = property(__attr__) def close(self): "close the map to any new jobs" try: self._pool.close() except AttributeError: pass def join(self): #NOTE: ValueError on running pool "reclaim the closed workers" try: self._pool.join() except AttributeError: pass def clear(self): "remove pool singleton, if exists" try: self._pool.clear() except AttributeError: pass def __del__(self): """shutdown the worker pool and tidy up """ try: self.close() self.join() self.clear() #XXX: clear or not? except Exception: pass self._pool = None class Smap(Map): def __init__(self, pool=None, *args, **kwds): """starmap instance with internal lazy pool instantiation Args: pool: pool object (i.e. pathos.pools.ProcessPool) *args: positional arguments for pool initialization **kwds: keyword arguments for pool initialization close: if True, close the pool to any new jobs [Default: False] join: if True, reclaim the pool's closed workers [Default: False] clear: if True, delete the pool singleton [Default: False] NOTE: if a pool object is not provided, an itertools.starmap will be used with the returned iterator cast to a list. NOTE: pools from both multiprocess and pathos.pools can be used, however the behavior is slightly different. Pools from both pathos and multiprocess have close and join methods, to close the pool to new jobs, and to shut down the pool's workers. Pools from pathos, however, are launched as singletons, so they also include a clear method that deletes the singleton. In either case, a pool that has been "closed" will throw a ValueError if map is then called, and similarly, a ValueError will be thrown if join is called before a pool is "closed". The major difference is that if a pathos.pool is closed, the map instance cannot run new jobs until "clear" is called, while a new multiprocess pool will be created each time the map is executed. This leads to pathos.pools generally being called with either ``clear=True`` or ``clear=False``, and pools from multprocess either using ``close=True`` or ``join=True`` or both. Some hierarchical parallel workflows are not allowed, and will result in an error being thrown; however, changing close, join, or clear can often remove the error. """ super().__init__(pool, *args, **kwds) def __call__(self, func, *args, **kwds): """instantiate a pool and execute the pool's starmap Args: func: function object to map *args: positional arguments for starmap **kwds: keyword arguments for starmap Returns: results from execution of ``starmap(func, *args, **kwds)`` NOTE: initializes a new worker pool with each call """ if self.pool is None: #XXX: args, kwds? from itertools import starmap return list(starmap(func, *args)) #XXX: iterator or list? self._pool = pool = self.pool(*self.args, **self.kwds) smap = getattr(pool, 'smap', getattr(pool, 'starmap', None)) if smap is None: result = pool.map(lambda x: func(*x), *args, **kwds) else: result = smap(func, *args, **kwds) if self._close: self.close() if self._join: self.join() if self._clear: self.clear() return result class Imap(Map): def __init__(self, pool=None, *args, **kwds): """map iterator with internal lazy pool instantiation Args: pool: pool object (i.e. pathos.pools.ProcessPool) *args: positional arguments for pool initialization **kwds: keyword arguments for pool initialization close: if True, close the pool to any new jobs [Default: False] join: if True, reclaim the pool's closed workers [Default: False] clear: if True, delete the pool singleton [Default: False] NOTE: if a pool object is not provided, a builtins.map will be used. NOTE: pools from both multiprocess and pathos.pools can be used, however the behavior is slightly different. Pools from both pathos and multiprocess have close and join methods, to close the pool to new jobs, and to shut down the pool's workers. Pools from pathos, however, are launched as singletons, so they also include a clear method that deletes the singleton. In either case, a pool that has been "closed" will throw a ValueError if map is then called, and similarly, a ValueError will be thrown if join is called before a pool is "closed". The major difference is that if a pathos.pool is closed, the map instance cannot run new jobs until "clear" is called, while a new multiprocess pool will be created each time the map is executed. This leads to pathos.pools generally being called with either ``clear=True`` or ``clear=False``, and pools from multprocess either using ``close=True`` or ``join=True`` or both. Some hierarchical parallel workflows are not allowed, and will result in an error being thrown; however, changing close, join, or clear can often remove the error. """ super().__init__(pool, *args, **kwds) def __call__(self, func, *args, **kwds): """instantiate a pool and execute the pool's map iterator Args: func: function object to map *args: positional arguments for map iterator **kwds: keyword arguments for map iterator Returns: results from execution of ``map(func, *args, **kwds)`` iterator NOTE: initializes a new worker pool with each call """ if self.pool is None: #XXX: args, kwds? return map(func, *args) self._pool = pool = self.pool(*self.args, **self.kwds) imap = getattr(pool, 'imap', None) if imap is None: #NOTE: should not happen return NotImplemented result = imap(func, *args, **kwds) if self._close: self.close() if self._join: self.join() if self._clear: self.clear() return result class Amap(Map): def __init__(self, pool=None, *args, **kwds): """async map instance with internal lazy pool instantiation Args: pool: pool object (i.e. pathos.pools.ProcessPool) *args: positional arguments for pool initialization **kwds: keyword arguments for pool initialization close: if True, close the pool to any new jobs [Default: False] join: if True, reclaim the pool's closed workers [Default: False] clear: if True, delete the pool singleton [Default: False] NOTE: if a pool object is not provided, NotImplemented is returned upon use. NOTE: pools from both multiprocess and pathos.pools can be used, however the behavior is slightly different. Pools from both pathos and multiprocess have close and join methods, to close the pool to new jobs, and to shut down the pool's workers. Pools from pathos, however, are launched as singletons, so they also include a clear method that deletes the singleton. In either case, a pool that has been "closed" will throw a ValueError if map is then called, and similarly, a ValueError will be thrown if join is called before a pool is "closed". The major difference is that if a pathos.pool is closed, the map instance cannot run new jobs until "clear" is called, while a new multiprocess pool will be created each time the map is executed. This leads to pathos.pools generally being called with either ``clear=True`` or ``clear=False``, and pools from multprocess either using ``close=True`` or ``join=True`` or both. Some hierarchical parallel workflows are not allowed, and will result in an error being thrown; however, changing close, join, or clear can often remove the error. """ super().__init__(pool, *args, **kwds) def __call__(self, func, *args, **kwds): """instantiate a pool and execute the pool's async map Args: func: function object to map *args: positional arguments for async map **kwds: keyword arguments for async map Returns: results from execution of async ``map(func, *args, **kwds)`` NOTE: initializes a new worker pool with each call """ if self.pool is None: #XXX: args, kwds? return NotImplemented self._pool = pool = self.pool(*self.args, **self.kwds) amap = getattr(pool, 'amap', getattr(pool, 'map_async', None)) if amap is None: #NOTE: should not happen return NotImplemented result = amap(func, *args, **kwds) if self._close: self.close() if self._join: self.join() if self._clear: self.clear() return result class Asmap(Map): def __init__(self, pool=None, *args, **kwds): """async starmap instance with internal lazy pool instantiation Args: pool: pool object (i.e. pathos.pools.ProcessPool) *args: positional arguments for pool initialization **kwds: keyword arguments for pool initialization close: if True, close the pool to any new jobs [Default: False] join: if True, reclaim the pool's closed workers [Default: False] clear: if True, delete the pool singleton [Default: False] NOTE: if a pool object is not provided, NotImplemented is returned upon use. NOTE: pools from both multiprocess and pathos.pools can be used, however the behavior is slightly different. Pools from both pathos and multiprocess have close and join methods, to close the pool to new jobs, and to shut down the pool's workers. Pools from pathos, however, are launched as singletons, so they also include a clear method that deletes the singleton. In either case, a pool that has been "closed" will throw a ValueError if map is then called, and similarly, a ValueError will be thrown if join is called before a pool is "closed". The major difference is that if a pathos.pool is closed, the map instance cannot run new jobs until "clear" is called, while a new multiprocess pool will be created each time the map is executed. This leads to pathos.pools generally being called with either ``clear=True`` or ``clear=False``, and pools from multprocess either using ``close=True`` or ``join=True`` or both. Some hierarchical parallel workflows are not allowed, and will result in an error being thrown; however, changing close, join, or clear can often remove the error. """ super().__init__(pool, *args, **kwds) def __call__(self, func, *args, **kwds): """instantiate a pool and execute the pool's async starmap Args: func: function object to map *args: positional arguments for async starmap **kwds: keyword arguments for async starmap Returns: results from execution of async ``starmap(func, *args, **kwds)`` NOTE: initializes a new worker pool with each call """ if self.pool is None: #XXX: args, kwds? return NotImplemented self._pool = pool = self.pool(*self.args, **self.kwds) asmap = getattr(pool, 'asmap', getattr(pool, 'starmap_async', None)) if asmap is None: result = pool.amap(lambda x: func(*x), *args, **kwds) else: result = asmap(func, *args, **kwds) if self._close: self.close() if self._join: self.join() if self._clear: self.clear() return result class Uimap(Map): def __init__(self, pool=None, *args, **kwds): """unordered map iterator with internal lazy pool instantiation Args: pool: pool object (i.e. pathos.pools.ProcessPool) *args: positional arguments for pool initialization **kwds: keyword arguments for pool initialization close: if True, close the pool to any new jobs [Default: False] join: if True, reclaim the pool's closed workers [Default: False] clear: if True, delete the pool singleton [Default: False] NOTE: if a pool object is not provided, NotImplemented is returned upon use. NOTE: pools from both multiprocess and pathos.pools can be used, however the behavior is slightly different. Pools from both pathos and multiprocess have close and join methods, to close the pool to new jobs, and to shut down the pool's workers. Pools from pathos, however, are launched as singletons, so they also include a clear method that deletes the singleton. In either case, a pool that has been "closed" will throw a ValueError if map is then called, and similarly, a ValueError will be thrown if join is called before a pool is "closed". The major difference is that if a pathos.pool is closed, the map instance cannot run new jobs until "clear" is called, while a new multiprocess pool will be created each time the map is executed. This leads to pathos.pools generally being called with either ``clear=True`` or ``clear=False``, and pools from multprocess either using ``close=True`` or ``join=True`` or both. Some hierarchical parallel workflows are not allowed, and will result in an error being thrown; however, changing close, join, or clear can often remove the error. """ super().__init__(pool, *args, **kwds) def __call__(self, func, *args, **kwds): """instantiate a pool and execute the pool's unordered map iterator Args: func: function object to map *args: positional arguments for unordered map iterator **kwds: keyword arguments for unordered map iterator Returns: results from execution of unordered ``map(func, *args, **kwds)`` iterator NOTE: initializes a new worker pool with each call """ if self.pool is None: #XXX: args, kwds? return NotImplemented self._pool = pool = self.pool(*self.args, **self.kwds) uimap = getattr(pool, 'uimap', getattr(pool, 'imap_unordered', None)) if uimap is None: #NOTE: should not happen return NotImplemented result = uimap(func, *args, **kwds) if self._close: self.close() if self._join: self.join() if self._clear: self.clear() return result class Ismap(Map): def __init__(self, pool=None, *args, **kwds): """starmap iterator with internal lazy pool instantiation Args: pool: pool object (i.e. pathos.pools.ProcessPool) *args: positional arguments for pool initialization **kwds: keyword arguments for pool initialization close: if True, close the pool to any new jobs [Default: False] join: if True, reclaim the pool's closed workers [Default: False] clear: if True, delete the pool singleton [Default: False] NOTE: if a pool object is not provided, an itertools.starmap will be used. NOTE: pools from both multiprocess and pathos.pools can be used, however the behavior is slightly different. Pools from both pathos and multiprocess have close and join methods, to close the pool to new jobs, and to shut down the pool's workers. Pools from pathos, however, are launched as singletons, so they also include a clear method that deletes the singleton. In either case, a pool that has been "closed" will throw a ValueError if map is then called, and similarly, a ValueError will be thrown if join is called before a pool is "closed". The major difference is that if a pathos.pool is closed, the map instance cannot run new jobs until "clear" is called, while a new multiprocess pool will be created each time the map is executed. This leads to pathos.pools generally being called with either ``clear=True`` or ``clear=False``, and pools from multprocess either using ``close=True`` or ``join=True`` or both. Some hierarchical parallel workflows are not allowed, and will result in an error being thrown; however, changing close, join, or clear can often remove the error. """ super().__init__(pool, *args, **kwds) def __call__(self, func, *args, **kwds): """instantiate a pool and execute the pool's starmap iterator Args: func: function object to map *args: positional arguments for starmap iterator **kwds: keyword arguments for starmap iterator Returns: results from execution of ``starmap(func, *args, **kwds)`` iterator NOTE: initializes a new worker pool with each call """ if self.pool is None: #XXX: args, kwds? from itertools import starmap return starmap(func, *args) self._pool = pool = self.pool(*self.args, **self.kwds) ismap = getattr(pool, 'ismap', None) if ismap is None: result = pool.imap(lambda x: func(*x), *args, **kwds) else: result = ismap(func, *args, **kwds) if self._close: self.close() if self._join: self.join() if self._clear: self.clear() return result uqfoundation-pathos-33e3f91/pathos/mp_map.py000066400000000000000000000055651467657623600212430ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ minimal interface to python's multiprocessing module Notes: This module has been deprecated in favor of ``pathos.pools``. """ from pathos.multiprocessing import ProcessPool, __STATE from pathos.threading import ThreadPool #XXX: thread __STATE not imported from pathos.helpers import cpu_count mp = ProcessPool() #FIXME: don't do this tp = ThreadPool() #FIXME: don't do this __all__ = ['mp_map'] # backward compatibility #FIXME: deprecated... and buggy! (fails to dill on imap/uimap) def mp_map(function, sequence, *args, **kwds): '''extend python's parallel map function to multiprocessing Args: function - target function sequence - sequence to process in parallel nproc - number of 'local' cpus to use [defaut = 'autodetect'] type - processing type ['blocking', 'non-blocking', 'unordered'] threads - if True, use threading instead of multiprocessing ''' processes = cpu_count() proctype = 'blocking' threads = False if 'nproc' in kwds: processes = kwds['nproc'] kwds.pop('nproc') # provide a default that is not a function call if processes == None: processes = cpu_count() if 'type' in kwds: proctype = kwds['type'] kwds.pop('type') if 'threads' in kwds: threads = kwds['threads'] kwds.pop('threads') # remove all the junk kwds that are added due to poor design! if 'nnodes' in kwds: kwds.pop('nnodes') if 'nodes' in kwds: kwds.pop('nodes') if 'launcher' in kwds: kwds.pop('launcher') if 'mapper' in kwds: kwds.pop('mapper') if 'queue' in kwds: kwds.pop('queue') if 'timelimit' in kwds: kwds.pop('timelimit') if 'scheduler' in kwds: kwds.pop('scheduler') if 'ncpus' in kwds: kwds.pop('ncpus') if 'servers' in kwds: kwds.pop('servers') if proctype in ['blocking']: if not threads: return mp.map(function,sequence,*args,**kwds) else: return tp.map(function,sequence,*args,**kwds) elif proctype in ['unordered']: if not threads: return mp.uimap(function,sequence,*args,**kwds) else: return tp.uimap(function,sequence,*args,**kwds) elif proctype in ['non-blocking', 'ordered']: if not threads: return mp.imap(function,sequence,*args,**kwds) else: return tp.imap(function,sequence,*args,**kwds) # default if not threads: return mp.map(function,sequence,*args,**kwds) else: return tp.map(function,sequence,*args,**kwds) if __name__ == '__main__': pass uqfoundation-pathos-33e3f91/pathos/multiprocessing.py000066400000000000000000000240121467657623600232050ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module contains map and pipe interfaces to python's multiprocessing module. Pipe methods provided: pipe - blocking communication pipe [returns: value] apipe - asynchronous communication pipe [returns: object] Map methods provided: map - blocking and ordered worker pool [returns: list] imap - non-blocking and ordered worker pool [returns: iterator] uimap - non-blocking and unordered worker pool [returns: iterator] amap - asynchronous worker pool [returns: object] Usage ===== A typical call to a pathos multiprocessing map will roughly follow this example: >>> # instantiate and configure the worker pool >>> from pathos.multiprocessing import ProcessPool >>> pool = ProcessPool(nodes=4) >>> >>> # do a blocking map on the chosen function >>> print(pool.map(pow, [1,2,3,4], [5,6,7,8])) >>> >>> # do a non-blocking map, then extract the results from the iterator >>> results = pool.imap(pow, [1,2,3,4], [5,6,7,8]) >>> print("...") >>> print(list(results)) >>> >>> # do an asynchronous map, then get the results >>> results = pool.amap(pow, [1,2,3,4], [5,6,7,8]) >>> while not results.ready(): ... time.sleep(5); print(".", end=' ') ... >>> print(results.get()) >>> >>> # do one item at a time, using a pipe >>> print(pool.pipe(pow, 1, 5)) >>> print(pool.pipe(pow, 2, 6)) >>> >>> # do one item at a time, using an asynchronous pipe >>> result1 = pool.apipe(pow, 1, 5) >>> result2 = pool.apipe(pow, 2, 6) >>> print(result1.get()) >>> print(result2.get()) Notes ===== This worker pool leverages the python's multiprocessing module, and thus has many of the limitations associated with that module. The function f and the sequences in args must be serializable. The maps in this worker pool have full functionality whether run from a script or in the python interpreter, and work reliably for both imported and interactively-defined functions. Unlike python's multiprocessing module, pathos.multiprocessing maps can directly utilize functions that require multiple arguments. """ __all__ = ['ProcessPool','_ProcessPool'] #FIXME: probably not good enough... should store each instance with a uid __STATE = _ProcessPool__STATE = {} from pathos.abstract_launcher import AbstractWorkerPool from pathos.helpers.mp_helper import starargs as star from pathos.helpers import cpu_count, freeze_support, ProcessPool as Pool import warnings import sys OLD312a7 = (sys.hexversion < 0x30c00a7) # 'forward' compatibility _ProcessPool = Pool class ProcessPool(AbstractWorkerPool): """ Mapper that leverages python's multiprocessing. """ def __init__(self, *args, **kwds): """\nNOTE: if number of nodes is not given, will autodetect processors. \nNOTE: additional keyword input is optional, with: id - identifier for the pool initializer - function that takes no input, called when node is spawned initargs - tuple of args for initializers that have args maxtasksperchild - int that limits the max number of tasks per node """ hasnodes = 'nodes' in kwds; arglen = len(args) if 'ncpus' in kwds and (hasnodes or arglen): msg = "got multiple values for keyword argument 'ncpus'" raise TypeError(msg) elif hasnodes: #XXX: multiple try/except is faster? if arglen: msg = "got multiple values for keyword argument 'nodes'" raise TypeError(msg) kwds['ncpus'] = kwds.pop('nodes') elif arglen: kwds['ncpus'] = args[0] if 'processes' in kwds: if 'ncpus' in kwds: msg = "got multiple values for keyword argument 'processes'" raise TypeError(msg) kwds['ncpus'] = kwds.pop('processes') self.__nodes = kwds.pop('ncpus', cpu_count()) # Create an identifier for the pool self._id = kwds.pop('id', None) #'pool' if self._id is None: self._id = self.__nodes self._kwds = kwds # Create a new server if one isn't already initialized self._serve() return if AbstractWorkerPool.__init__.__doc__: __init__.__doc__ = AbstractWorkerPool.__init__.__doc__ + __init__.__doc__ #def __exit__(self, *args): # self._clear() # return def _serve(self, nodes=None): #XXX: should be STATE method; use id """Create a new server if one isn't already initialized""" if nodes is None: nodes = self.__nodes _pool = __STATE.get(self._id, None) if not _pool or nodes != _pool.__nodes or self._kwds != _pool._kwds: self._clear() _pool = Pool(nodes, **self._kwds) _pool.__nodes = nodes _pool._kwds = self._kwds __STATE[self._id] = _pool return _pool def _clear(self): #XXX: should be STATE method; use id """Remove server with matching state""" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes and self._kwds == _pool._kwds: _pool.close() _pool.join() __STATE.pop(self._id, None) return #XXX: return _pool? clear = _clear def map(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) _pool = self._serve() with warnings.catch_warnings(): if not OLD312a7: warnings.filterwarnings('ignore', category=DeprecationWarning) return _pool.map(star(f), zip(*args), **kwds) map.__doc__ = AbstractWorkerPool.map.__doc__ def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) _pool = self._serve() with warnings.catch_warnings(): if not OLD312a7: warnings.filterwarnings('ignore', category=DeprecationWarning) return _pool.imap(star(f), zip(*args), **kwds) imap.__doc__ = AbstractWorkerPool.imap.__doc__ def uimap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) _pool = self._serve() with warnings.catch_warnings(): if not OLD312a7: warnings.filterwarnings('ignore', category=DeprecationWarning) return _pool.imap_unordered(star(f), zip(*args), **kwds) uimap.__doc__ = AbstractWorkerPool.uimap.__doc__ def amap(self, f, *args, **kwds): # register a callback ? AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) _pool = self._serve() with warnings.catch_warnings(): if not OLD312a7: warnings.filterwarnings('ignore', category=DeprecationWarning) return _pool.map_async(star(f), zip(*args), **kwds) amap.__doc__ = AbstractWorkerPool.amap.__doc__ ######################################################################## # PIPES def pipe(self, f, *args, **kwds): #AbstractWorkerPool._AbstractWorkerPool__pipe(self, f, *args, **kwds) _pool = self._serve() with warnings.catch_warnings(): if not OLD312a7: warnings.filterwarnings('ignore', category=DeprecationWarning) return _pool.apply(f, args, kwds) pipe.__doc__ = AbstractWorkerPool.pipe.__doc__ def apipe(self, f, *args, **kwds): # register a callback ? #AbstractWorkerPool._AbstractWorkerPool__apipe(self, f, *args, **kwds) _pool = self._serve() with warnings.catch_warnings(): if not OLD312a7: warnings.filterwarnings('ignore', category=DeprecationWarning) return _pool.apply_async(f, args, kwds) apipe.__doc__ = AbstractWorkerPool.apipe.__doc__ ######################################################################## def __repr__(self): mapargs = (self.__class__.__name__, self.ncpus) return "" % mapargs def __get_nodes(self): """get the number of nodes used in the map""" return self.__nodes def __set_nodes(self, nodes): """set the number of nodes used in the map""" self._serve(nodes) self.__nodes = nodes return ######################################################################## def restart(self, force=False): "restart a closed pool" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes and self._kwds == _pool._kwds: RUN = 0 if not force: assert _pool._state != RUN # essentially, 'clear' and 'serve' self._clear() _pool = Pool(self.__nodes, **self._kwds) _pool.__nodes = self.__nodes _pool._kwds = self._kwds __STATE[self._id] = _pool return _pool def close(self): "close the pool to any new jobs" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes: _pool.close() return def terminate(self): "a more abrupt close" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes: _pool.terminate() return def join(self): "cleanup the closed worker processes" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes: _pool.join() return # interface ncpus = property(__get_nodes, __set_nodes) nodes = property(__get_nodes, __set_nodes) __state__ = __STATE pass # backward compatibility from pathos.helpers import ThreadPool from pathos.threading import ThreadPool as ThreadingPool ProcessingPool = ProcessPool # EOF uqfoundation-pathos-33e3f91/pathos/parallel.py000066400000000000000000000421631467657623600215610ustar00rootroot00000000000000#!/usr/bin/env python # # Based on code by Kirk Strauser # Rev: 1139; Date: 2008-04-16 # (see license text in pathos.pp_map) # # Forked by: Mike McKerns (April 2008) # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2008-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # Modified to meet the pathos pool API """ This module contains map and pipe interfaces to the parallelpython (pp) module. Pipe methods provided: pipe - blocking communication pipe [returns: value] apipe - asynchronous communication pipe [returns: object] Map methods provided: map - blocking and ordered worker pool [returns: list] imap - non-blocking and ordered worker pool [returns: iterator] uimap - non-blocking and unordered worker pool [returns: iterator] amap - asynchronous worker pool [returns: object] Usage ===== A typical call to a pathos pp map will roughly follow this example: >>> # instantiate and configure the worker pool >>> from pathos.pp import ParallelPool >>> pool = ParallelPool(nodes=4) >>> >>> # do a blocking map on the chosen function >>> print(pool.map(pow, [1,2,3,4], [5,6,7,8])) >>> >>> # do a non-blocking map, then extract the results from the iterator >>> results = pool.imap(pow, [1,2,3,4], [5,6,7,8]) >>> print("...") >>> print(list(results)) >>> >>> # do an asynchronous map, then get the results >>> results = pool.amap(pow, [1,2,3,4], [5,6,7,8]) >>> while not results.ready(): ... time.sleep(5); print(".", end=' ') ... >>> print(results.get()) >>> >>> # do one item at a time, using a pipe >>> print(pool.pipe(pow, 1, 5)) >>> print(pool.pipe(pow, 2, 6)) >>> >>> # do one item at a time, using an asynchronous pipe >>> result1 = pool.apipe(pow, 1, 5) >>> result2 = pool.apipe(pow, 2, 6) >>> print(result1.get()) >>> print(result2.get()) Notes ===== This worker pool leverages the parallelpython (pp) module, and thus has many of the limitations associated with that module. The function f and the sequences in args must be serializable. The maps in this worker pool have full functionality when run from a script, but may be somewhat limited when used in the python interpreter. Both imported and interactively-defined functions in the interpreter session may fail due to the pool failing to find the source code for the target function. For a work-around, try: >>> # instantiate and configure the worker pool >>> from pathos.pp import ParallelPool >>> pool = ParallelPool(nodes=4) >>> >>> # wrap the function, so it can be used interactively by the pool >>> def wrapsin(*args, **kwds): >>> from math import sin >>> return sin(*args, **kwds) >>> >>> # do a blocking map using the wrapped function >>> results = pool.map(wrapsin, [1,2,3,4,5]) """ __all__ = ['ParallelPool', 'stats'] from pathos.helpers import parallelpython as pp from pathos.helpers import cpu_count import builtins #FIXME: probably not good enough... should store each instance with a uid __STATE = _ParallelPool__STATE = {} def __print_stats(servers=None): "print stats from the pp.Server" FROM_STATE = True if servers is None: servers = list(__STATE.values()) else: FROM_STATE = False try: servers = tuple(servers) except TypeError: servers = (servers,) if not servers: msg = '; no active' if FROM_STATE else ' for the requested' print("Stats are not available%s servers.\n" % msg) return for server in servers: # fails if not pp.Servers #XXX: also print ids? (__STATE.keys())? server.print_stats() return #XXX: better return object(?) to query? | is per run? compound? def stats(pool=None): "return a string containing stats response from the pp.Server" server = None if pool is None else __STATE.get(pool._id, tuple()) import io import sys stdout = sys.stdout try: sys.stdout = result = io.StringIO() __print_stats(server) except: result = None #XXX: better throw an error? sys.stdout = stdout result = result.getvalue() if result else '' return result from pathos.abstract_launcher import AbstractWorkerPool from pathos.helpers.pp_helper import ApplyResult, MapResult #XXX: should look into parallelpython for 'cluster computing' class ParallelPool(AbstractWorkerPool): """ Mapper that leverages parallelpython (i.e. pp) maps. """ def __init__(self, *args, **kwds): """\nNOTE: if number of nodes is not given, will autodetect processors. \nNOTE: if a tuple of servers is not provided, defaults to localhost only. \nNOTE: additional keyword input is optional, with: id - identifier for the pool servers - tuple of pp.Servers """ hasnodes = 'nodes' in kwds; arglen = len(args) if 'ncpus' in kwds and (hasnodes or arglen): msg = "got multiple values for keyword argument 'ncpus'" raise TypeError(msg) elif hasnodes: #XXX: multiple try/except is faster? if arglen: msg = "got multiple values for keyword argument 'nodes'" raise TypeError(msg) kwds['ncpus'] = kwds.pop('nodes') elif arglen: kwds['ncpus'] = args[0] self.__nodes = None self.__servers = () ncpus = kwds.get('ncpus', None) #servers = kwds.get('servers', ('*',)) # autodetect servers = kwds.get('servers', ()) # only localhost if servers is None: servers = () #from _ppserver_config import ppservers as servers # config file # Create an identifier for the pool self._id = kwds.get('id', None) #'server' if self._id is None: from numbers import Integral _nodes = str(ncpus) if isinstance(ncpus, Integral) else '*' self._id = '@'.join([_nodes, '+'.join(sorted(servers))]) #XXX: throws 'socket.error' when starting > 1 server with autodetect # Create a new server if one isn't already initialized # ...and set the requested level of multi-processing self._exiting = False _pool = self._serve(nodes=ncpus, servers=servers) #XXX: or register new UID for each instance? #_pool.set_ncpus(ncpus or 'autodetect') # no ncpus=0 #print("configure %s local workers" % _pool.get_ncpus()) return if AbstractWorkerPool.__init__.__doc__: __init__.__doc__ = AbstractWorkerPool.__init__.__doc__ + __init__.__doc__ #def __exit__(self, *args): # self._clear() # return def _serve(self, nodes=None, servers=None): #XXX: is a STATE method; use id """Create a new server if one isn't already initialized""" # get nodes and servers in form used by pp.Server if nodes is None: nodes = self.nodes #XXX: autodetect must be explicit if nodes in ['*']: nodes = 'autodetect' if servers is None: servers = tuple(sorted(self.__servers)) # no servers is () elif servers in ['*', 'autodetect']: servers = ('*',) # if no server, create one _pool = __STATE.get(self._id, None) if not _pool: _pool = pp.Server(ppservers=servers) # convert to form returned by pp.Server, then compare _auto = [('*',)] if _pool.auto_ppservers else [] _servers = sorted(_pool.ppservers + _auto) _servers = tuple(':'.join((str(i) for i in tup)) for tup in _servers) if servers != _servers: #XXX: assume servers specifies ports if desired _pool = pp.Server(ppservers=servers) # convert to form returned by pp.Server, then compare _nodes = cpu_count() if nodes=='autodetect' else nodes if _nodes != _pool.get_ncpus(): _pool.set_ncpus(nodes) # allows ncpus=0 # set (or 'repoint') the server __STATE[self._id] = _pool # set the 'self' internals self.__nodes = None if nodes in ['autodetect'] else nodes self.__servers = servers return _pool def _clear(self): #XXX: should be STATE method; use id """Remove server with matching state""" _pool = __STATE.get(self._id, None) if not self._equals(_pool): return # it's the 'same' (better to check _pool.secret?) _pool.destroy() __STATE.pop(self._id, None) self._exiting = False return #XXX: return _pool? clear = _clear def map(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) return list(self.imap(f, *args)) # chunksize map.__doc__ = AbstractWorkerPool.map.__doc__ def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print("using %s local workers" % _pool.get_ncpus()) try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) # submit all jobs, then collect results as they become available return (subproc() for subproc in list(builtins.map(submit, *args))) # chunksize imap.__doc__ = AbstractWorkerPool.imap.__doc__ def uimap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print("using %s local workers" % _pool.get_ncpus()) try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) def imap_unordered(it): """build a unordered map iterator""" it = list(it) while len(it): for i,job in enumerate(it): if job.finished: yield it.pop(i)() break # yield it.pop(0).get() # wait for the first element? # *subprocess* # alternately, loop in a subprocess return #raise StopIteration # submit all jobs, then collect results as they become available return imap_unordered(builtins.map(submit, *args)) # chunksize uimap.__doc__ = AbstractWorkerPool.uimap.__doc__ def amap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) def submit(*argz): """send a job to the server""" _pool = self._serve() #print("using %s local workers" % _pool.get_ncpus()) try: return _pool.submit(f, argz, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) override = True if 'size' in kwds else False elem_size = kwds.pop('size', 2) length = min(len(task) for task in args) args = zip(*args) #XXX: zip iterator ok? or should be list? # submit all jobs, to be collected later with 'get()' tasks = [submit(*task) for task in args] tasks = [ApplyResult(task) for task in tasks] # build a correctly sized results object nodes = self.nodes if self.nodes in ['*','autodetect',None]: _pool = self._serve() nodes = _pool.get_ncpus() #NOTE: local only #nodes = _pool.get_active_nodes() #XXX: ppft version? #nodes = min(j for (i,j) in nodes.items() if i != 'local') if not nodes: nodes = 1 # try to quickly find a small chunksize that gives good results maxsize = 2**62 #XXX: HOPEFULLY, this will never be reached... chunksize = 1 # chunksize while chunksize < maxsize: chunksize, extra = divmod(length, nodes * elem_size) if override: break # the user *wants* to override this loop if extra >= length: break # we found something that 'works' elem_size = elem_size * 2 if extra: chunksize += 1 m = MapResult((chunksize,length)) # queue the tasks m.queue(*tasks) return m amap.__doc__ = AbstractWorkerPool.amap.__doc__ ######################################################################## # PIPES def pipe(self, f, *args, **kwds): #AbstractWorkerPool._AbstractWorkerPool__pipe(self, f, *args, **kwds) # submit a job to the server, and block until results are collected _pool = self._serve() try: task = _pool.submit(f, args, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) return task() pipe.__doc__ = AbstractWorkerPool.pipe.__doc__ def apipe(self, f, *args, **kwds): # register a callback ? #AbstractWorkerPool._AbstractWorkerPool__apipe(self, f, *args, **kwds) # submit a job, to be collected later with 'get()' _pool = self._serve() try: task = _pool.submit(f, args, globals=globals()) except pp.DestroyedServerError: self._is_alive(None) return ApplyResult(task) apipe.__doc__ = AbstractWorkerPool.apipe.__doc__ ######################################################################## def __repr__(self): mapargs = (self.__class__.__name__, self.ncpus, self.servers) return "" % mapargs def __get_nodes(self): """get the number of nodes used in the map""" nodes = self.__nodes if nodes == None: nodes = '*' return nodes def __set_nodes(self, nodes): """set the number of nodes used in the map""" if nodes is None: nodes = 'autodetect' self._serve(nodes=nodes) return def __get_servers(self): """get the servers used in the map""" servers = self.__servers if servers == (): servers = None elif servers == ('*',): servers = '*' return servers def __set_servers(self, servers): """set the servers used in the map""" if servers is None: servers = () self._serve(servers=servers) #__STATE[self._id].ppservers == [(s.split(':')[0],int(s.split(':')[1])) for s in servers] # we could check if the above is true... for now we will just be lazy # we could also convert lists to tuples... again, we'll be lazy # XXX: throws "socket error" when autodiscovery service is enabled return ######################################################################## def restart(self, force=False): "restart a closed pool" _pool = __STATE.get(self._id, None) if self._equals(_pool): if not force: self._is_alive(_pool, negate=True) # 'clear' and 'serve' if self._exiting: # i.e. is destroyed self._clear() else: # only closed, so just 'reopen' _pool._exiting = False #NOTE: setting pool._exiting = False *may* hang python! _pool = self._serve() return def _is_alive(self, server=None, negate=False, run=True): RUN,CLOSE,TERMINATE = 0,1,2 pool = lambda :None if server is None: pool._state = RUN if negate else CLOSE else: pool._state = server._exiting if negate and run: # throw error if alive (exiting=True) assert pool._state != RUN elif negate: # throw error if alive (exiting=True) assert pool._state in (CLOSE, TERMINATE) else: # throw error if not alive (exiting=False) raise ValueError("Pool not running") def _equals(self, server): "check if the server is compatible" if not server: return False _nodes = cpu_count() if self.__nodes is None else self.__nodes if _nodes != server.get_ncpus(): return False _auto = [('*',)] if server.auto_ppservers else [] _servers = sorted(server.ppservers + _auto) _servers = [':'.join((str(i) for i in tup)) for tup in _servers] return sorted(self.__servers) == _servers def close(self): "close the pool to any new jobs" _pool = __STATE.get(self._id, None) if self._equals(_pool): _pool._exiting = True return def terminate(self): "a more abrupt close" self.close() self.join() return def join(self): "cleanup the closed worker processes" _pool = __STATE.get(self._id, None) if self._equals(_pool): self._is_alive(_pool, negate=True, run=False) _pool.destroy() self._exiting = True # i.e. is destroyed return # interface ncpus = property(__get_nodes, __set_nodes) nodes = property(__get_nodes, __set_nodes) servers = property(__get_servers, __set_servers) __state__ = __STATE pass # backward compatibility ParallelPythonPool = ParallelPool # EOF uqfoundation-pathos-33e3f91/pathos/pools.py000066400000000000000000000023521467657623600211150ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ pools: pools of pathos workers, providing map and pipe constructs """ def _clear(type=None): "destroy all cached pools (of the given type)" pools = (ProcessPool, ThreadPool, ParallelPool, SerialPool) _pools = (_ProcessPool, _ThreadPool) #pools += _pools if type is None: for pool in pools: pool.__state__.clear() elif type in pools: type.__state__.clear() elif type in _pools: msg = "use the close() method to shutdown" raise NotImplementedError(msg) else: msg = "'%s' is not one of the pathos.pools" % type raise TypeError(msg) return from pathos.helpers import ProcessPool as _ProcessPool from pathos.helpers import ThreadPool as _ThreadPool from pathos.multiprocessing import ProcessPool from pathos.threading import ThreadPool from pathos.parallel import ParallelPool from pathos.serial import SerialPool uqfoundation-pathos-33e3f91/pathos/portpicker.py000077500000000000000000000036301467657623600221460ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # this script prints out an available port number. # adapted from J. Kim & M. McKerns utility functions """ This script prints out an available port number. """ class portnumber(object): '''port selector Usage: >>> pick = portnumber(min=1024,max=65535) >>> print( pick() ) ''' def __init__(self, min=0, max=64*1024): '''select a port number from a given range. The first call will return a random number from the available range, and each subsequent call will return the next number in the range. Args: min -- minimum port number [default = 0] max -- maximum port number [default = 65536] ''' self.min = min self.max = max self.first = -1 self.current = -1 return def __call__(self): import random if self.current < 0: #first call self.current = random.randint(self.min, self.max) self.first = self.current return self.current else: self.current += 1 if self.current > self.max: self.current = self.min if self.current == self.first: raise RuntimeError( 'Range exhausted' ) return self.current return def randomport(min=1024, max=65536): '''select a random port number Args: min -- minimum port number [default = 1024] max -- maximum port number [default = 65536] ''' return portnumber(min, max)() if __name__ == '__main__': pick = portnumber(min=1024,max=65535) print( pick() ) # End of file uqfoundation-pathos-33e3f91/pathos/pp.py000066400000000000000000000012631467657623600204000ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2008-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # """ ``pathos`` interface to the ``pp`` (parallel python) module. Notes: This module has been deprecated in favor of ``pathos.parallel``. """ # backward compatibility __all__ = ['ParallelPythonPool', 'stats'] from pathos.parallel import __doc__, __print_stats, __STATE from pathos.parallel import * ParallelPythonPool = ParallelPool # EOF uqfoundation-pathos-33e3f91/pathos/pp_map.py000077500000000000000000000166751467657623600212550ustar00rootroot00000000000000#!/usr/bin/env python # Based on code by Kirk Strauser # Rev: 1139; Date: 2008-04-16 # (also see code in pathos.pp) # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # * Neither the name of Kirk Strauser nor the names of other # contributors may be used to endorse or promote products # derived from this software without specific prior written # permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # Forked by: Mike McKerns (April 2008) # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2008-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ minimal interface to python's ``pp`` (parallel python) module Implements a work-alike of the builtin ``map`` function that distributes work across many processes. As it uses ``ppft`` to do the actual parallel processing, code using this must conform to the usual ``ppft`` restrictions (arguments must be serializable, etc). Notes: This module has been deprecated in favor of ``pathos.pools``. """ from pathos.pp import __STATE, stats, __print_stats as print_stats #from pathos.pp import ParallelPythonPool as Pool from pathos.helpers.pp_helper import Server as ppServer def ppmap(processes, function, sequence, *sequences): """Split the work of 'function' across the given number of processes. Set 'processes' to None to let Parallel Python autodetect the number of children to use. Although the calling semantics should be identical to __builtin__.map (even using __builtin__.map to process arguments), it differs in that it returns a generator instead of a list. This enables lazy evaluation of the results so that other work can be done while the subprocesses are still running. >>> def rangetotal(n): return n, sum(range(n)) >>> list(map(rangetotal, range(1, 6))) [(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)] >>> list(ppmap(1, rangetotal, range(1, 6))) [(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)] """ ppservers = ("*",) # autodetect #from _ppserver_config import ppservers # read from a config file # Create a new server if one isn't already initialized if not __STATE.get('server', None): __STATE['server'] = ppServer(ppservers=ppservers) #class dill_wrapper(object): # """handle non-picklable functions by wrapping with dill""" # def __init__(self, function): # from dill import dumps # self.pickled_function = dumps(function) # def __call__(self, *args): # from dill import loads #XXX: server now requires dill # f = loads(self.pickled_function) # return f(*args) # def dill_wrapper(function): # """handle non-picklable functions by wrapping with dill""" # from dill import dumps # pickled_function = dumps(function) # def unwrap(*args): # from dill import loads #XXX: server now requires dill # f = loads(pickled_function) # return f(*args) # return unwrap def submit(*args): #XXX: needs **kwds to allow "depfuncs, modules, ...? """Send a job to the server""" #print globals()['ncalls'] #FIXME: ncalls not in globals() #XXX: options for submit... #XXX: func -- function to be executed #XXX: depfuncs -- functions called from 'func' #XXX: modules -- modules to import #XXX: callback -- callback function to be called after 'func' completes #XXX: callbackargs -- additional args for callback(result, *args) #XXX: group -- allows naming of 'job group' to use in wait(group) #XXX: globals -- dictionary from which everything imports # from mystic.tools import wrap_function, wrap_bounds # return __STATE['server'].submit(function, args, \ # depfuncs=(wrap_function,wrap_bounds), \ ## modules=("mystic","numpy"), \ # globals=globals()) # p_function = dill_wrapper(function) # return __STATE['server'].submit(p_function, args, globals=globals()) #print __STATE['server'].get_ncpus(), "local workers" #XXX: debug return __STATE['server'].submit(function, args, globals=globals()) # Merge all the passed-in argument lists together. This is done # that way because as with the map() function, at least one list # is required but the rest are optional. a = [sequence] a.extend(sequences) # Set the requested level of multi-processing #__STATE['server'].set_ncpus(processes or 'autodetect') # never processes=0 if processes == None: __STATE['server'].set_ncpus('autodetect') else: __STATE['server'].set_ncpus(processes) # allow processes=0 #print "running with", __STATE['server'].get_ncpus(), "local workers" #XXX: debug # First, submit all the jobs. Then harvest the results as they # come available. return (subproc() for subproc in map(submit, *a)) def pp_map(function, sequence, *args, **kwds): '''extend python's parallel map function to parallel python Args: function - target function sequence - sequence to process in parallel ncpus - number of 'local' processors to use [defaut = 'autodetect'] servers - available distributed parallel python servers [default = ()] ''' procs = None servers = () if 'ncpus' in kwds: procs = kwds['ncpus'] kwds.pop('ncpus') if 'servers' in kwds: servers = kwds['servers'] kwds.pop('servers') # remove all the junk kwds that are added due to poor design! if 'nnodes' in kwds: kwds.pop('nnodes') if 'nodes' in kwds: kwds.pop('nodes') if 'launcher' in kwds: kwds.pop('launcher') if 'mapper' in kwds: kwds.pop('mapper') if 'queue' in kwds: kwds.pop('queue') if 'timelimit' in kwds: kwds.pop('timelimit') if 'scheduler' in kwds: kwds.pop('scheduler') # return Pool(procs, servers=servers).map(function, sequence, *args, **kwds) if not __STATE.get('server',None): __STATE['server'] = job_server = ppServer(ppservers=servers) return list(ppmap(procs,function,sequence,*args)) if __name__ == '__main__': # code moved to "pathos/examples/pp_map.py pass # EOF uqfoundation-pathos-33e3f91/pathos/profile.py000066400000000000000000000335721467657623600214310ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module contains functions for profiling in other threads and processes. Functions for identifying a thread/process: process_id - get the identifier (process id) for the current process thread_id - get the identifier for the current thread Functions for controlling profiling: enable_profiling - initialize a profiler in the current thread/process start_profiling - begin profiling everything in the current thread/process stop_profiling - stop profiling everything in the current thread/process disable_profiling - remove the profiler from the current thread/process Functions that control profile statstics (pstats) output clear_stats - clear stored pstats from the current thread/process get_stats - get stored pstats for the current thread/process print_stats - print stored pstats for the current thread/process dump_stats - dump stored pstats for the current thread/process Functions that add/remove profiling: profiled - decorator to add profiling to a function not_profiled - decorator to remove profiling from a function profile - decorator for profiling a function (will enable_profiling) Usage ===== Typical calls to pathos profiling will roughly follow this example:: >>> import time >>> import random >>> import pathos.profile as pr >>> >>> # build a worker function >>> def _work(i): ... x = random.random() ... time.sleep(x) ... return (i,x) >>> >>> # generate a 'profiled' work function >>> config = dict(gen=pr.process_id) >>> work = pr.profiled(**config)(_work) >>> >>> # enable profiling >>> pr.enable_profiling() >>> >>> # profile the work (not the map internals) in the main process >>> for i in map(work, range(-10,0)): ... print(i) ... >>> # profile the map in the main process, and work in the other process >>> from pathos.helpers import mp >>> pool = mp.Pool(10) >>> _uimap = pr.profiled(**config)(pool.imap_unordered) >>> for i in _uimap(work, range(-10,0)): ... print(i) ... >>> # deactivate all profiling >>> pr.disable_profiling() # in the main process >>> tuple(_uimap(pr.disable_profiling, range(10))) # in the workers >>> for i in _uimap(work, range(-20,-10)): ... print(i) ... >>> # re-activate profiling >>> pr.enable_profiling() >>> >>> # print stats for profile of 'import math' in another process >>> def test_import(module): ... __import__(module) ... >>> import pathos.pools as pp >>> pool = pp.ProcessPool(1) >>> pr.profile('cumulative', pipe=pool.pipe)(test_import, 'pox') 10 function calls in 0.003 seconds Ordered by: cumulative time ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.003 0.003 :1(test_import) 1 0.002 0.002 0.003 0.003 {__import__} 1 0.001 0.001 0.001 0.001 __init__.py:8() 1 0.000 0.000 0.000 0.000 shutils.py:11() 1 0.000 0.000 0.000 0.000 _disk.py:15() 1 0.000 0.000 0.000 0.000 {eval} 1 0.000 0.000 0.000 0.000 utils.py:11() 1 0.000 0.000 0.000 0.000 :1() 1 0.000 0.000 0.000 0.000 info.py:2() 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects} >>> pool.close() >>> pool.join() >>> pool.clear() Notes ===== This module leverages the python's cProfile module, and is primarily a high-level interface to that module that strives to make profiling in a different thread or process easier. The use of pathos.pools are suggested, however are not required (as seen in the example above). In many cases, profiling in another thread is not necessary, and either of the following can be sufficient/better for timing and profiling:: $ python -c "import time; s=time.time(); import pathos; print (time.time()-s)" $ python -c "import cProfile; p=cProfile.Profile(); p.enable(); import pathos; p.print_stats('cumulative')" This module was inspired by: http://stackoverflow.com/a/32522579/4646678. """ # module-level handle to the profiler instance for the current thread/process profiler = None def process_id(): "get the identifier (process id) for the current process" from pathos.helpers import mp return mp.current_process().pid def thread_id(): "get the identifier for the current thread" import threading as th return th.current_thread().ident class profiled(object): "decorator for profiling a function (does not call enable profiling)" def __init__(self, gen=None, prefix='id-', suffix='.prof'): """y=gen(), with y an indentifier (e.g. current_process().pid) Important class members: prefix - string prefix for pstats filename [default: 'id-'] suffix - string suffix for pstats filename [default: '.prof'] pid - function for obtaining id of current process/thread sort - integer index of column in pstats output for sorting Example: >>> import time >>> import random >>> import pathos.profile as pr >>> >>> config = dict(gen=pr.process_id) >>> @pr.profiled(**config) ... def work(i): ... x = random.random() ... time.sleep(x) ... return (i,x) ... >>> pr.enable_profiling() >>> # profile the work (not the map internals); write to file for pstats >>> for i in map(work, range(-10,0)): ... print(i) ... NOTE: If gen is a bool or string, then sort=gen and pid is not used. Otherwise, pid=gen and sort is not used. Output can be ordered by setting gen to one of the following: 'calls' - call count 'cumulative' - cumulative time 'cumtime' - cumulative time 'file' - file name 'filename' - file name 'module' - file name 'ncalls' - call count 'pcalls' - primitive call count 'line' - line number 'name' - function name 'nfl' - name/file/line 'stdname' - standard name 'time' - internal time 'tottime' - internal time """ self.prefix = prefix self.suffix= suffix #XXX: tricky: if gen is bool/str then print, else dump with gen=id_gen if (type(gen) is str) or (repr(gen) in ('True','False')): self.sort = -1 if (repr(gen) in ('True','False')) else gen self.pid = str else: self.sort = -1 self.pid = process_id if gen is None else gen def __call__(self, f): def proactive(*args, **kwds): try: profiler.enable() doit = True except AttributeError: doit = False except NameError: doit = False res = f(*args, **kwds) if doit: profiler.disable() # XXX: option to not dump? if self.pid is str: profiler.print_stats(self.sort) else: profiler.dump_stats('%s%s%s' % (self.prefix,self.pid(),self.suffix)) return res proactive.__wrapped__ = f #XXX: conflicts with other __wrapped__ ? return proactive def not_profiled(f): "decorator to remove profiling (due to 'profiled') from a function" if getattr(f, '__name__', None) == 'proactive': _f = getattr(f, '__wrapped__', f) else: _f = f def wrapper(*args, **kwds): return _f(*args, **kwds) return wrapper def enable_profiling(*args): #XXX: args ignored (needed for use in map) "initialize a profiler instance in the current thread/process" global profiler #XXX: better profiler[0] or dict? import cProfile profiler = cProfile.Profile() #XXX: access at: pathos.profile.profiler return def start_profiling(*args): "begin profiling everything in the current thread/process" if profiler is None: enable_profiling() try: profiler.enable() except AttributeError: pass except NameError: pass return def stop_profiling(*args): "stop profiling everything in the current thread/process" try: profiler.disable() except AttributeError: pass except NameError: pass return def disable_profiling(*args): "remove the profiler instance from the current thread/process" global profiler if profiler is not None: stop_profiling() globals().pop('profiler', None) profiler = None return def clear_stats(*args): "clear all stored profiling results from the current thread/process" try: profiler.clear() except AttributeError: pass except NameError: pass return def get_stats(*args): "get all stored profiling results for the current thread/process" try: res = profiler.getstats() except AttributeError: pass except NameError: pass return res def print_stats(*args, **kwds): #kwds=dict(sort=-1) "print all stored profiling results for the current thread/process" sort = kwds.get('sort', -1) try: profiler.print_stats(sort) except AttributeError: pass except NameError: pass return def dump_stats(*args, **kwds): # kwds=dict(gen=None, prefix='id-', suffix='.prof')) """dump all stored profiling results for the current thread/process Notes: see ``pathos.profile.profiled`` for settings for ``*args`` and ``**kwds`` """ config = dict(gen=None, prefix='id-', suffix='.prof') config.update(kwds) prefix = config['prefix'] suffix= config['suffix'] pid = config['gen'] pid = process_id if pid is None else pid #XXX: default is str?? file = '%s%s%s' % (prefix, pid(), suffix) try: profiler.dump_stats(file) except AttributeError: pass except NameError: pass return class profile(object): "decorator for profiling a function (will enable profiling)" def __init__(self, sort=None, **config): """sort is integer index of column in pstats output for sorting Important class members: pipe - pipe instance in which profiling is active Example: >>> import time >>> import random >>> import pathos.profile as pr >>> ... def work(): ... x = random.random() ... time.sleep(x) ... return x ... >>> # profile the work; print pstats info >>> pr.profile()(work) 4 function calls in 0.136 seconds Ordered by: standard name ncalls tottime percall cumtime percall filename:lineno(function) 1 0.000 0.000 0.136 0.136 :1(work) 1 0.000 0.000 0.000 0.000 {method 'disable' of '_lsprof.Profiler' objects} 1 0.000 0.000 0.000 0.000 {method 'random' of '_random.Random' objects} 1 0.136 0.136 0.136 0.136 {time.sleep} 0.1350568110491419 >>> NOTE: pipe provided should come from pool built with nodes=1. Other configuration keywords (config) are passed to 'pr.profiled'. Output can be ordered by setting sort to one of the following: 'calls' - call count 'cumulative' - cumulative time 'cumtime' - cumulative time 'file' - file name 'filename' - file name 'module' - file name 'ncalls' - call count 'pcalls' - primitive call count 'line' - line number 'name' - function name 'nfl' - name/file/line 'stdname' - standard name 'time' - internal time 'tottime' - internal time """ pipe = config.pop('pipe', None) if (sort is not None) and (repr(sort) not in ('True','False')): config.update(dict(gen=sort)) self.config = dict(gen=False) if not bool(config) else config from pathos.pools import SerialPool if pipe is None: self._pool = SerialPool() self.pipe = self._pool.pipe else: self.pipe = pipe self._pool = getattr(pipe, '__self__', SerialPool()) if self._pool.nodes != 1: raise ValueError('pipe must draw from a pool with only one node') return def __call__(self, function, *args, **kwds): #self._pool.nodes, nodes = 1, self._pool.nodes #XXX: skip this? self.pipe(enable_profiling, None) result = self.pipe(profiled(**self.config)(function), *args, **kwds) self.pipe(disable_profiling, None) #self._pool.nodes = nodes #XXX: skip this? return result """ def _enable_profiling(f): #FIXME: gradual: only applied to *new* workers "activate profiling for the given function in the current thread" def func(*arg, **kwd): enable_profiling() #XXX: include f under profiler or above? return f(*arg, **kwd) # func.__wrapped__ = f #XXX: conflict with other usings __wrapped__ return func def _disable_profiling(f): #FIXME: gradual: only applied to *new* workers "deactivate profiling for the given function in the current thread" try: _f = f.__wrapped__ except AttributeError: _f = f def func(*arg, **kwd): disable_profiling() #XXX: include f under profiler or above? return _f(*arg, **kwd) func.__wrapped__ = _f return func def profiling(pool): "decorator for initializing profiling functions called within a pool" def wrapper(*args, **kwds): initializer = kwds.get('initializer', None) pool._rinitializer = initializer if initializer is None: initializer = lambda *x,**y: (x,y) kwds['initializer'] = _enable_profiling(initializer) return pool(*args, **kwds) return wrapper """ # EOF uqfoundation-pathos-33e3f91/pathos/python.py000066400000000000000000000012061467657623600212770ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2008-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # """ ``pathos`` interface to python's (serial) ``map`` functions Notes: This module has been deprecated in favor of ``pathos.serial``. """ # backward compatibility __all__ = ['PythonSerial'] from pathos.serial import __doc__, __STATE from pathos.serial import * PythonSerial = SerialPool # EOF uqfoundation-pathos-33e3f91/pathos/secure/000077500000000000000000000000001467657623600206735ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/pathos/secure/__init__.py000066400000000000000000000006771467657623600230160ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from .connection import Pipe from .copier import Copier from .tunnel import Tunnel, TunnelException uqfoundation-pathos-33e3f91/pathos/secure/connection.py000066400000000000000000000104211467657623600234020ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # adapted from Mike McKerns' and June Kim's gsl SSHLauncher class """ This module contains the derived class for secure shell (ssh) launchers See the following for an example. Usage ===== A typical call to a 'ssh pipe' will roughly follow this example: >>> # instantiate the pipe, providing it with a unique identifier >>> pipe = Pipe('launcher') >>> >>> # configure the pipe to perform the command on the selected host >>> pipe(command='hostname', host='remote.host.edu') >>> >>> # execute the launch and retrieve the response >>> pipe.launch() >>> print(pipe.response()) """ __all__ = ['Pipe'] from pathos.connection import Pipe as _Pipe # broke backward compatability: 30/05/14 ==> replace base-class almost entirely class Pipe(_Pipe): '''a popen-based ssh-pipe for parallel and distributed computing.''' def __init__(self, name=None, **kwds): '''create a ssh pipe Inputs: name: a unique identifier (string) for the pipe host: hostname to recieve command [user@host is also valid] command: a command to send [default = 'echo '] launcher: remote service mechanism (i.e. ssh, rsh) [default = 'ssh'] options: remote service options (i.e. -v, -N, -L) [default = ''] background: run in background [default = False] decode: ensure response is 'ascii' [default = True] stdin: file-like object to serve as standard input for the remote process ''' self.launcher = kwds.pop('launcher', 'ssh') self.options = kwds.pop('options', '') self.host = kwds.pop('host', 'localhost') super(Pipe, self).__init__(name, **kwds) return def config(self, **kwds): '''configure a remote command using given keywords: (Re)configure the copier for the following inputs: host: hostname to recieve command [user@host is also valid] command: a command to send [default = 'echo '] launcher: remote service mechanism (i.e. ssh, rsh) [default = 'ssh'] options: remote service options (i.e. -v, -N, -L) [default = ''] background: run in background [default = False] decode: ensure response is 'ascii' [default = True] stdin: file-like object to serve as standard input for the remote process ''' if self.message is None: self.message = 'echo %s' % self.name #' '? else: # pare back down to 'command' # better, just save _command? if self.launcher: self.message = self.message.split(self.launcher, 1)[-1] if self.options: self.message = self.message.split(self.options, 1)[-1] if self.host: self.message = self.message.split(self.host, 1)[-1].strip() quote = ('"',"'") if self.message.startswith(quote) or self.message.endswith(quote): self.message = self.message[1:-1] if self.stdin is None: import sys self.stdin = sys.stdin for key, value in kwds.items(): if key == 'command': self.message = value elif key == 'host': self.host = value elif key == 'launcher': self.launcher = value elif key == 'options': self.options = value elif key == 'background': self.background = value elif key == 'decode': self.codec = value elif key == 'stdin': self.stdin = value self._stdout = None self.message = '%s %s %s "%s"' % (self.launcher, self.options, self.host, self.message) names=['message','host','launcher','options','background','stdin','codec'] return dict((i,getattr(self, i)) for i in names) # interface __call__ = config pass if __name__ == '__main__': pass # End of file uqfoundation-pathos-33e3f91/pathos/secure/copier.py000066400000000000000000000101421467657623600225240ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # adapted from Mike McKerns' gsl SCPLauncher class """ This module contains the derived class for launching secure copy (scp) commands. See the following for an example. Usage ===== A typical call to a 'scp launcher' will roughly follow this example: >>> # instantiate the launcher, providing it with a unique identifier >>> copier = Copier('copier') >>> >>> # configure and launch the copy to the selected destination >>> copier(source='~/foo.txt', destination='remote.host.edu:~') >>> copier.launch() >>> >>> # configure and launch the copied file to a new destination >>> copier(source='remote.host.edu:~/foo.txt', destination='.') >>> copier.launch() >>> print(copier.response()) """ __all__ = ['FileNotFound','Copier'] class FileNotFound(Exception): '''Exception for improper source or destination format''' pass from pathos.connection import Pipe as _Pipe # broke backward compatability: 30/05/14 ==> replace base-class almost entirely class Copier(_Pipe): '''a popen-based copier for parallel and distributed computing.''' def __init__(self, name=None, **kwds): '''create a copier Inputs: name: a unique identifier (string) for the launcher source: hostname:path of original [user@host:path is also valid] destination: hostname:path for copy [user@host:path is also valid] launcher: remote service mechanism (i.e. scp, cp) [default = 'scp'] options: remote service options (i.e. -v, -P) [default = ''] background: run in background [default = False] decode: ensure response is 'ascii' [default = True] stdin: file-like object to serve as standard input for the remote process ''' self.launcher = kwds.pop('launcher', 'scp') self.options = kwds.pop('options', '') self.source = kwds.pop('source', '.') self.destination = kwds.pop('destination', '.') super(Copier, self).__init__(name, **kwds) return def config(self, **kwds): '''configure the copier using given keywords: (Re)configure the copier for the following inputs: source: hostname:path of original [user@host:path is also valid] destination: hostname:path for copy [user@host:path is also valid] launcher: remote service mechanism (i.e. scp, cp) [default = 'scp'] options: remote service options (i.e. -v, -P) [default = ''] background: run in background [default = False] decode: ensure response is 'ascii' [default = True] stdin: file-like object to serve as standard input for the remote process ''' if self.stdin is None: import sys self.stdin = sys.stdin for key, value in kwds.items(): if key == 'command': raise KeyError('command') elif key == 'source': # if quoted, can be multiple sources self.source = value elif key == 'destination': self.destination = value elif key == 'launcher': self.launcher = value elif key == 'options': self.options = value elif key == 'background': self.background = value elif key == 'decode': self.codec = value elif key == 'stdin': self.stdin = value self._stdout = None self.message = '%s %s %s %s' % (self.launcher, self.options, self.source, self.destination) names=['source','destination','launcher','options','background','stdin','codec'] return dict((i,getattr(self, i)) for i in names) # interface __call__ = config pass if __name__ == '__main__': pass # End of file uqfoundation-pathos-33e3f91/pathos/secure/tunnel.py000066400000000000000000000120231467657623600225500ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # adapted from J. Kim & M. McKerns' Tunnel class """ This module contains the base class for secure tunnel connections, and describes the pathos tunnel interface. See the following for an example. Usage ===== A typical call to a pathos 'tunnel' will roughly follow this example: >>> # instantiate the tunnel, providing it with a unique identifier >>> tunnel = Tunnel('tunnel') >>> >>> # establish a tunnel to the remote host and port >>> remotehost = 'remote.host.edu' >>> remoteport = 12345 >>> localport = tunnel.connect(remotehost, remoteport) >>> print("Tunnel connected at local port: %s" % tunnel._lport) >>> >>> # pause script execution to maintain the tunnel (i.e. do something) >>> sys.stdin.readline() >>> >>> # tear-down the tunneled connection >>> tunnel.disconnect() """ __all__ = ['Tunnel','TunnelException'] import os import signal import random import string from pathos.secure import Pipe class TunnelException(Exception): '''Exception for failure to establish ssh tunnel''' pass class Tunnel(object): """a ssh-tunnel launcher for parallel and distributed computing.""" #MINPORT = 49152 MINPORT = 1024 MAXPORT = 65535 verbose = True def connect(self, host, port=None, through=None): '''establish a secure shell tunnel between local and remote host Input: host -- remote hostname [user@host:path is also valid] port -- remote port number Additional Input: through -- 'tunnel-through' hostname [default = None] ''' from pathos.portpicker import portnumber if port is None: from pathos.core import randomport port = randomport(through) if through else randomport(host) pick = portnumber(self.MINPORT, self.MAXPORT) while True: localport = pick() if localport < 0: raise TunnelException('No available local port') #print('Trying port %d...' % localport) try: self._connect(localport, host, port, through=through) #print('SSH tunnel %d:%s:%d' % (localport, host, port)) except TunnelException as e: # breaks 2.5 compatibility if e.args[0] == 'bind': self.disconnect() continue else: self.__disconnect() raise TunnelException('Connection failed') self.connected = True return localport def disconnect(self): '''destroy the ssh tunnel''' #FIXME: grep (?) for self._launcher.message, then kill the pid if self._pid > 0: if self.verbose: print('Kill ssh pid=%d' % self._pid) os.kill(self._pid, signal.SIGTERM) os.waitpid(self._pid, 0) self.__disconnect() return def __disconnect(self): '''disconnect tunnel internals''' self._pid = 0 self.connected = False self._lport = None self._rport = None self._host = None return def __init__(self, name=None, **kwds): '''create a ssh tunnel launcher Inputs: name -- a unique identifier (string) for the launcher ''' xyz = string.ascii_letters self.name = ''.join(random.choice(xyz) for i in range(16)) \ if name is None else name self._launcher = Pipe('launcher') self.__disconnect() if kwds: self.connect(**kwds) return def __repr__(self): if not self.connected: return "Tunnel('%s')" % self.name try: msg = self._launcher.message.split(' ',1)[-1].rstrip('"').rstrip() except: msg = self._launcher.message return "Tunnel('%s')" % msg def _connect(self, localport, remotehost, remoteport, through=None): options = '-q -N -L %d:%s:%d' % (localport, remotehost, remoteport) command = '' if through: rhost = through else: rhost = remotehost self._launcher(host=rhost, command=command, options=options, background=True) #XXX: MMM #options=options, background=False) self._launcher.launch() self._lport = localport self._rport = remoteport self._host = rhost self._pid = self._launcher.pid() #FIXME: should be tunnel_pid [pid()+1] line = self._launcher.response() if line: if line.startswith('bind'): raise TunnelException('bind') else: print(line) raise TunnelException('failure') return if __name__ == '__main__': pass # End of file uqfoundation-pathos-33e3f91/pathos/selector.py000066400000000000000000000135621467657623600216060ustar00rootroot00000000000000#!/usr/bin/env python # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Michael A.G. Aivazis # California Institute of Technology # (C) 1998-2005 All Rights Reserved # # # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Forked by: Mike McKerns (November 2004) # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2004-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module implements a selector class, which can be used to dispatch events and for event handler wrangling. """ class Selector(object): """ Selector object for watching and event notification. """ def watch(self, timeout=None): """dispatch events to the registered hanlders""" if timeout: self._timeout = timeout self._watch() return # FIXME: # leave like this until I understand better the set of exceptions I # would like to handle. It really is bad to catch all exceptions, # especially since it hides errors during development try: self._watch() except: # catch all exceptions self._cleanup() # get exception information import sys type, value = sys.exc_info()[:2] # rethrow the exception so the clients can handle it raise type(value) return def notifyOnReadReady(self, fd, handler): """add to the list of routines to call when is read ready""" self._input.setdefault(fd, []).append(handler) return def notifyOnWriteReady(self, fd, handler): """add to the list of routines to call when is write ready""" self._output.setdefault(fd, []).append(handler) return def notifyOnException(self, fd, handler): """add to the list of routines to call when raises an exception""" self._exception.setdefault(fd, []).append(handler) return def notifyOnInterrupt(self, handler): """add to the list of routines to call when a signal arrives""" self._interrupt.append(handler) return def notifyWhenIdle(self, handler): """add to the list of routines to call when a timeout occurs""" self._idle.append(handler) return def __init__(self): """ Takes no initial input. """ self.state = True self._timeout = self._TIMEOUT # the fd activity clients self._input = {} self._output = {} self._exception = {} # clients to notify when there is nothing else to do self._idle = [] self._interrupt = [] return def _watch(self): import select while self.state: self._debug.debug("constructing list of watchers") iwtd = list(self._input.keys()) owtd = list(self._output.keys()) ewtd = list(self._exception.keys()) self._debug.debug("input: %s" % iwtd) self._debug.debug("output: %s" % owtd) self._debug.debug("exception: %s" % ewtd) self._debug.debug("checking for indefinite block") if not iwtd and not owtd and not ewtd and not self._idle: self._debug.info("no registered handlers left; exiting") return self._debug.debug("calling select") try: reads, writes, excepts = select.select(iwtd, owtd, ewtd, self._timeout) except select.error as error: # breaks 2.5 compatibility # GUESS: # when a signal is delivered to a signal handler registered # by the application, the select call is interrupted and # raises a select.error errno, msg = error.args self._debug.info("signal received: %d: %s" % (errno, msg)) continue self._debug.debug("returned from select") # dispatch to the idle handlers if this was a timeout if not reads and not writes and not excepts: self._debug.info("no activity; dispatching to idle handlers") for handler in self._idle: if not handler(self): self._idle.remove(handler) else: # dispatch to the registered handlers self._debug.info("dispatching to exception handlers") self._dispatch(self._exception, excepts) self._debug.info("dispatching to output handlers") self._dispatch(self._output, writes) self._debug.info("dispatching to input handlers") self._dispatch(self._input, reads) return def _dispatch(self, handlers, entities): for fd in entities: for handler in handlers[fd]: if not handler(self, fd): handlers[fd].remove(handler) if not handlers[fd]: del handlers[fd] return def _cleanup(self): self._debug.info("cleaning up") for fd in self._input: fd.close() for fd in self._output: fd.close() for fd in self._exception: fd.close() for handler in self._interrupt: handler(self) return # static members from pathos import logger _debug = logger(name="pathos.selector", level=30) # logging.WARN del logger # constants _TIMEOUT = .5 # End of file uqfoundation-pathos-33e3f91/pathos/serial.py000066400000000000000000000113671467657623600212460ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module contains map and pipe interfaces to standard (i.e. serial) python. Pipe methods provided: pipe - blocking communication pipe [returns: value] Map methods provided: map - blocking and ordered worker pool [returns: list] imap - non-blocking and ordered worker pool [returns: iterator] Usage ===== A typical call to a pathos python map will roughly follow this example: >>> # instantiate and configure the worker pool >>> from pathos.serial import SerialPool >>> pool = SerialPool() >>> >>> # do a blocking map on the chosen function >>> print(pool.map(pow, [1,2,3,4], [5,6,7,8])) >>> >>> # do a non-blocking map, then extract the results from the iterator >>> results = pool.imap(pow, [1,2,3,4], [5,6,7,8]) >>> print("...") >>> print(list(results)) >>> >>> # do one item at a time, using a pipe >>> print(pool.pipe(pow, 1, 5)) >>> print(pool.pipe(pow, 2, 6)) Notes ===== This worker pool leverages the built-in python maps, and thus does not have limitations due to serialization of the function f or the sequences in args. The maps in this worker pool have full functionality whether run from a script or in the python interpreter, and work reliably for both imported and interactively-defined functions. """ __all__ = ['SerialPool'] from pathos.abstract_launcher import AbstractWorkerPool __get_nodes__ = AbstractWorkerPool._AbstractWorkerPool__get_nodes __set_nodes__ = AbstractWorkerPool._AbstractWorkerPool__set_nodes from builtins import map as _map _apply = lambda f, args, kwds: f(*args, **kwds) _imap = _map #XXX: good for interface... or bad idea? __STATE = _SerialPool__STATE = {} #FIXME: in python3.x mp.map returns a list, mp.imap an iterator class SerialPool(AbstractWorkerPool): """ Mapper that leverages standard (i.e. serial) python maps. """ # interface (no __init__) _exiting = False def map(self, f, *args, **kwds): #AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) if self._exiting: self._is_alive() return _map(f, *args)#, **kwds) # chunksize map.__doc__ = AbstractWorkerPool.map.__doc__ def imap(self, f, *args, **kwds): #AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) if self._exiting: self._is_alive() return _imap(f, *args)#, **kwds) # chunksize imap.__doc__ = AbstractWorkerPool.imap.__doc__ ######################################################################## # PIPES def pipe(self, f, *args, **kwds): #AbstractWorkerPool._AbstractWorkerPool__pipe(self, f, *args, **kwds) if self._exiting: self._is_alive() return _apply(f, args, kwds) pipe.__doc__ = AbstractWorkerPool.pipe.__doc__ #XXX: generator/yield provides simple ipipe? apipe? what about coroutines? ######################################################################## def restart(self, force=False): "restart a closed pool" if not force and not self._exiting: self._is_alive(negate=True) # 'clear' and 'serve' if self._exiting: # i.e. is destroyed self.clear() return def _is_alive(self, negate=False, run=True): RUN,CLOSE,TERMINATE = 0,1,2 pool = lambda :None pool._state = RUN if negate else CLOSE if negate and run: # throw error if alive (exiting=True) assert pool._state != RUN elif negate: # throw error if alive (exiting=True) assert pool._state in (CLOSE, TERMINATE) else: # throw error if not alive (exiting=False) raise ValueError("Pool not running") def close(self): "close the pool to any new jobs" self._exiting = True return def terminate(self): "a more abrupt close" self.close() self.join() return def join(self): "cleanup the closed worker processes" if not self._exiting: self._is_alive(negate=True, run=False) self._exiting = True return def clear(self): """hard restart""" self._exiting = False return ######################################################################## # interface __get_nodes = __get_nodes__ __set_nodes = __set_nodes__ nodes = property(__get_nodes, __set_nodes) __state__ = __STATE pass # backward compatibility PythonSerial = SerialPool # EOF uqfoundation-pathos-33e3f91/pathos/server.py000066400000000000000000000037031467657623600212700ustar00rootroot00000000000000#!/usr/bin/env python # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Michael A.G. Aivazis # California Institute of Technology # (C) 1998-2004 All Rights Reserved # # # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ # # Forked by: Mike McKerns (January 2004) # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2004-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module contains the base class for pathos servers, and describes the pathos server interface. If a third-party RPC server is selected, such as 'parallel python' (i.e. 'pp') or 'RPyC', direct calls to the third-party interface are currently used. """ __all__ = ['Server'] class Server(object): """ Server base class for pathos servers for parallel and distributed computing. """ def selector(self): """get the selector""" return self._selector def deactivate(self): """turn off the selector""" self._selector.state = False return def activate(self, onTimeout=None, selector=None): """configure the selector and install the timeout callback""" if selector is None: from pathos.selector import Selector selector = Selector() if onTimeout is not None: selector.notifyWhenIdle(onTimeout) self._selector = selector return def serve(self, timeout): """begin serving, and set the timeout""" self._selector.watch(timeout) return def __init__(self): """ Takes no initial input. """ self._selector = None return # End of file uqfoundation-pathos-33e3f91/pathos/tests/000077500000000000000000000000001467657623600205475ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/pathos/tests/__init__.py000066400000000000000000000007451467657623600226660ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ to run this test suite, first build and install `pathos`. $ python -m pip install ../.. then run the tests with: $ python -m pathos.tests or, if `nose` is installed: $ nosetests """ uqfoundation-pathos-33e3f91/pathos/tests/__main__.py000066400000000000000000000016051467657623600226430ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: print('F', end='', flush=True) failed = 1 else: print('.', end='', flush=True) print('') exit(failed) uqfoundation-pathos-33e3f91/pathos/tests/test_decorate.py000066400000000000000000000035511467657623600237520ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ demonstrates pickle/source failure cases with decorators/factories and pp """ def __wrap_nested(function, inner_function): def function_wrapper(x): _x = x[:] return function(inner_function(_x)) return function_wrapper def wrap_nested(inner_function): def dec(function): def function_wrapper(x): _x = x[:] return function(inner_function(_x)) return function_wrapper return dec class _wrap_nested(object): def __init__(self, inner_function): self._inner_function = inner_function def __call__(self, function): def function_wrapper(x): _x = x[:] return function(self._inner_function(_x)) return function_wrapper def add(*args): #from numpy import sum return sum(*args) ''' # FAILS to find 'add' (returns [None,None]) @wrap_nested(add) def addabs(x): return abs(x) ''' #addabs = __wrap_nested(abs, add) # ok wrapadd = wrap_nested(add) # ok #wrapadd = _wrap_nested(add) # HANGS addabs = wrapadd(abs) # #''' def test_wrap(): x = [(-1,-2),(3,-4)] y = [3, 1] assert list(map(addabs, x)) == y from pathos.pools import ProcessPool as Pool assert Pool().map(addabs, x) == y from pathos.pools import ParallelPool as Pool assert Pool().map(addabs, x) == y if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() test_wrap() shutdown() uqfoundation-pathos-33e3f91/pathos/tests/test_join.py000066400000000000000000000156241467657623600231270ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2015-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.parallel import * from pathos.multiprocessing import * from pathos.threading import * from pathos.helpers import cpu_count import sys import dill PYPY38 = (sys.hexversion >= 0x30800f0) and dill._dill.IS_PYPY PoolClosedError = ValueError PoolRunningError = ValueError def squared(x): return x**2 def check_basic(pool): state = pool.__state__ res = pool.map(squared, range(2)) assert res == [0, 1] res = pool.map(squared, range(2)) assert res == [0, 1] # join needs to be called after close try: pool.join() except PoolRunningError: pass else: raise AssertionError pool.close() # map fails when closed try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError obj = pool._serve() assert obj in list(state.values()) # serve has no effect on closed try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError # once restarted, map works pool.restart() res = pool.map(squared, range(2)) assert res == [0, 1] # assorted kicking of the tires... pool.close() pool.restart() res = pool.map(squared, range(2)) assert res == [0, 1] pool.close() pool.join() pool._serve() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError pool.join() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError pool.clear() res = pool.map(squared, range(2)) assert res == [0, 1] try: pool.join() except PoolRunningError: pass else: raise AssertionError pool.close() pool.join() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError pool.restart() res = pool.map(squared, range(2)) assert res == [0, 1] pool.close() pool.join() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError pool._serve() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError pool.restart() res = pool.map(squared, range(2)) assert res == [0, 1] pool.close() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError pool.restart() res = pool.map(squared, range(2)) assert res == [0, 1] obj = pool._serve() assert obj in list(state.values()) assert len(state) == 1 pool.terminate() pool.clear() assert len(state) == 0 return def check_nodes(pool): state = pool.__state__ tag = 'fixed' if pool._id == 'fixed' else None new_pool = type(pool) nodes = cpu_count() if nodes < 2: return half = nodes//2 res = pool.map(squared, range(2)) assert res == [0, 1] pool.close() # doesn't create a new pool... IS IT BETTER IF IT DOES? pool = new_pool(id=tag) try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError # creates a new pool (nodes are different) def nnodes(pool): return getattr(pool, '_'+new_pool.__name__+'__nodes') old_nodes = nnodes(pool) pool = new_pool(nodes=half, id=tag) new_nodes = nnodes(pool) if isinstance(pool, ParallelPool): print('SKIPPING: new_pool check for ParallelPool')#FIXME else: res = pool.map(squared, range(2)) assert res == [0, 1] assert new_nodes < old_nodes pool.close() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError # return to old number of nodes if tag is None: pool.clear() # clear 'half' pool pool = new_pool(id=tag) pool.restart() # restart old pool else: # creates a new pool (update nodes) pool = new_pool(id=tag) if isinstance(pool, ParallelPool): print('SKIPPING: new_pool check for ParallelPool')#FIXME else: res = pool.map(squared, range(2)) assert res == [0, 1] pool.close() # doesn't create a new pool... IS IT BETTER IF IT DOES? pool = new_pool(id=tag) try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError assert len(state) == 1 pool.clear() assert len(state) == 0 pool = new_pool(id=tag) res = pool.map(squared, range(2)) assert res == [0, 1] assert len(state) == 1 pool.terminate() assert len(state) == 1 pool.clear() assert len(state) == 0 return def check_rename(pool): state = pool.__state__ new_pool = type(pool) res = pool.map(squared, range(2)) assert res == [0, 1] old_id = pool._id # change the 'id' pool._id = 'foobar' pool = new_pool() # blow away the 'id' change res = pool.map(squared, range(2)) assert res == [0, 1] assert len(state) == 1 assert 'foobar' not in list(state.keys()) # change the 'id', but don't re-init pool._id = 'foobar' res = pool.map(squared, range(2)) assert res == [0, 1] assert len(state) == 2 assert 'foobar' in list(state.keys()) pool.close() try: pool.map(squared, range(2)) except PoolClosedError: pass else: raise AssertionError pool.terminate() assert len(state) == 2 assert 'foobar' in list(state.keys()) pool.clear() assert len(state) == 1 assert 'foobar' not in list(state.keys()) pool._id = old_id res = pool.map(squared, range(2)) assert res == [0, 1] pool.terminate() pool.clear() assert len(state) == 0 return def test_basic(): check_basic(ThreadPool()) # check_basic(ProcessPool()) # check_basic(ParallelPool()) def test_rename(): check_rename(ThreadPool()) check_rename(ProcessPool()) check_rename(ParallelPool()) def test_fixed(): check_nodes(ThreadPool(id='fixed')) check_nodes(ProcessPool(id='fixed')) check_nodes(ParallelPool(id='fixed')) def test_nodes(): check_nodes(ThreadPool()) if not PYPY38: #FIXME: fails with "OSError: [Errno 24] Too many open files" check_nodes(ProcessPool()) check_nodes(ParallelPool()) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() test_basic() test_rename() test_fixed() test_nodes() shutdown() uqfoundation-pathos-33e3f91/pathos/tests/test_map.py000066400000000000000000000041721467657623600227410ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE import time verbose = False delay = 0.01 items = 100 def busy_add(x,y, delay=0.01): import time for n in range(x): x += n for n in range(y): y -= n time.sleep(delay) return x + y def timed_pool(pool, items=100, delay=0.1, verbose=False): _x = range(-items//2,items//2,2) _y = range(len(_x)) _d = [delay]*len(_x) if verbose: print(pool) start = time.time() res = pool.map(busy_add, _x, _y, _d) _t = time.time() - start if verbose: print("time to queue: %s" % _t) start = time.time() _sol_ = list(res) t_ = time.time() - start if verbose: print("time to results: %s\n" % t_) return _sol_ class BuiltinPool(object): def map(self, *args): return list(map(*args)) std = timed_pool(BuiltinPool(), items, delay=0, verbose=False) def test_serial(): from pathos.pools import SerialPool as PS pool = PS() res = timed_pool(pool, items, delay, verbose) assert res == std def test_pp(): from pathos.pools import ParallelPool as PPP pool = PPP(servers=('localhost:5653','localhost:2414')) res = timed_pool(pool, items, delay, verbose) assert res == std def test_processing(): from pathos.pools import ProcessPool as MPP pool = MPP() res = timed_pool(pool, items, delay, verbose) assert res == std def test_threading(): from pathos.pools import ThreadPool as MTP pool = MTP() res = timed_pool(pool, items, delay, verbose) assert res == std if __name__ == '__main__': if verbose: print("CONFIG: delay = %s" % delay) print("CONFIG: items = %s" % items) print("") from pathos.helpers import freeze_support, shutdown freeze_support() test_serial() test_pp() test_processing() test_threading() shutdown() uqfoundation-pathos-33e3f91/pathos/tests/test_maps.py000066400000000000000000000034341467657623600231240ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.maps import * from pathos.pools import _ThreadPool as Pool def squared(x): import time time.sleep(.1) return x*x def added(*x): import time time.sleep(.1) return sum(x) s = Map() p = Map(Pool) assert s(squared, range(4)) == p(squared, range(4)) del s, p s = Imap() p = Imap(Pool) i = s(squared, range(4)) j = p(squared, range(4)) assert list(i) == list(j) del s, p, i, j s = Amap(Pool) #NotImplemented: Amap() p = Uimap(Pool) #NotImplemented: Uimap() assert sorted(p(squared, range(4))) == s(squared, range(4)).get() del s, p s = Smap() p = Smap(Pool) q = Asmap(Pool) #NotImplemented: Asmap() sequence = [[0,1],[2,3],[4,5],[6,7]] assert s(added, sequence) == p(added, sequence) == q(added, sequence).get() del s, p, q s = Ismap() p = Ismap(Pool) i = s(added, sequence) j = p(added, sequence) assert list(i) == list(j) del s, p, i, j p = Imap(Pool) i = p(squared, range(4)) j = p(squared, range(4)) assert list(i) == list(j) i = p(squared, range(4)) p.close() p.join() j = p(squared, range(4)) assert list(i) == list(j) del p, i, j import dill s = Map(Pool) p = Amap(Pool) assert dill.copy(s)(squared, range(4)) == dill.copy(p)(squared, range(4)).get() del s, p import os if not os.environ.get('COVERAGE'): #XXX: travis-ci from pathos.pools import _ProcessPool as _Pool s = Smap(Pool) p = Map(_Pool) r = s(p, [[squared, range(4)]]*4) del s, p s = Amap(Pool) p = Imap(_Pool) t = s(lambda x: list(p(squared, x)), [range(4)]*4) assert r == t.get() del s, p, r, t uqfoundation-pathos-33e3f91/pathos/tests/test_mp.py000066400000000000000000000130171467657623600225760ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE import time def test_mp(): # instantiate and configure the worker pool from pathos.pools import ProcessPool pool = ProcessPool(nodes=4) _result = list(map(pow, [1,2,3,4], [5,6,7,8])) # do a blocking map on the chosen function result = pool.map(pow, [1,2,3,4], [5,6,7,8]) assert result == _result # do a non-blocking map, then extract the result from the iterator result_iter = pool.imap(pow, [1,2,3,4], [5,6,7,8]) result = list(result_iter) assert result == _result # do an asynchronous map, then get the results result_queue = pool.amap(pow, [1,2,3,4], [5,6,7,8]) result = result_queue.get() assert result == _result # test ProcessPool keyword argument propagation pool.clear() pool = ProcessPool(nodes=4, initializer=lambda: time.sleep(0.6)) start = time.monotonic() result = pool.map(pow, [1,2,3,4], [5,6,7,8]) end = time.monotonic() assert result == _result assert end - start > 0.5 def test_tp(): # instantiate and configure the worker pool from pathos.pools import ThreadPool pool = ThreadPool(nodes=4) _result = list(map(pow, [1,2,3,4], [5,6,7,8])) # do a blocking map on the chosen function result = pool.map(pow, [1,2,3,4], [5,6,7,8]) assert result == _result # do a non-blocking map, then extract the result from the iterator result_iter = pool.imap(pow, [1,2,3,4], [5,6,7,8]) result = list(result_iter) assert result == _result # do an asynchronous map, then get the results result_queue = pool.amap(pow, [1,2,3,4], [5,6,7,8]) result = result_queue.get() assert result == _result # test ThreadPool keyword argument propagation pool.clear() pool = ThreadPool(nodes=4, initializer=lambda: time.sleep(0.6)) start = time.monotonic() result = pool.map(pow, [1,2,3,4], [5,6,7,8]) end = time.monotonic() assert result == _result assert end - start > 0.5 def test_chunksize(): # instantiate and configure the worker pool from pathos.pools import ProcessPool, _ProcessPool, ThreadPool from pathos.helpers.mp_helper import starargs as star pool = _ProcessPool(4) ppool = ProcessPool(4) tpool = ThreadPool(4) # do a blocking map on the chosen function result1 = pool.map(star(pow), zip([1,2,3,4],[5,6,7,8]), 1) assert result1 == ppool.map(pow, [1,2,3,4], [5,6,7,8], chunksize=1) assert result1 == tpool.map(pow, [1,2,3,4], [5,6,7,8], chunksize=1) result0 = pool.map(star(pow), zip([1,2,3,4],[5,6,7,8]), 0) assert result0 == ppool.map(pow, [1,2,3,4], [5,6,7,8], chunksize=0) assert result0 == tpool.map(pow, [1,2,3,4], [5,6,7,8], chunksize=0) # do an asynchronous map, then get the results result1 = pool.map_async(star(pow), zip([1,2,3,4],[5,6,7,8]), 1).get() assert result1 == ppool.amap(pow, [1,2,3,4], [5,6,7,8], chunksize=1).get() assert result1 == tpool.amap(pow, [1,2,3,4], [5,6,7,8], chunksize=1).get() result0 = pool.map_async(star(pow), zip([1,2,3,4],[5,6,7,8]), 0).get() assert result0 == ppool.amap(pow, [1,2,3,4], [5,6,7,8], chunksize=0).get() assert result0 == tpool.amap(pow, [1,2,3,4], [5,6,7,8], chunksize=0).get() # do a non-blocking map, then extract the result from the iterator result1 = list(pool.imap(star(pow), zip([1,2,3,4],[5,6,7,8]), 1)) assert result1 == list(ppool.imap(pow, [1,2,3,4], [5,6,7,8], chunksize=1)) assert result1 == list(tpool.imap(pow, [1,2,3,4], [5,6,7,8], chunksize=1)) try: list(pool.imap(star(pow), zip([1,2,3,4],[5,6,7,8]), 0)) error = AssertionError except Exception: import sys error = sys.exc_info()[0] try: list(ppool.imap(pow, [1,2,3,4], [5,6,7,8], chunksize=0)) assert False except error: pass except Exception: import sys e = sys.exc_info()[1] raise AssertionError(str(e)) try: list(tpool.imap(pow, [1,2,3,4], [5,6,7,8], chunksize=0)) assert False except error: pass except Exception: import sys e = sys.exc_info()[1] raise AssertionError(str(e)) # do a non-blocking map, then extract the result from the iterator res1 = sorted(pool.imap_unordered(star(pow), zip([1,2,3,4],[5,6,7,8]), 1)) assert res1 == sorted(ppool.uimap(pow, [1,2,3,4], [5,6,7,8], chunksize=1)) assert res1 == sorted(tpool.uimap(pow, [1,2,3,4], [5,6,7,8], chunksize=1)) try: sorted(pool.imap_unordered(star(pow), zip([1,2,3,4],[5,6,7,8]), 0)) error = AssertionError except Exception: import sys error = sys.exc_info()[0] try: sorted(ppool.uimap(pow, [1,2,3,4], [5,6,7,8], chunksize=0)) assert False except error: pass except Exception: import sys e = sys.exc_info()[1] raise AssertionError(str(e)) try: sorted(tpool.uimap(pow, [1,2,3,4], [5,6,7,8], chunksize=0)) assert False except error: pass except Exception: import sys e = sys.exc_info()[1] raise AssertionError(str(e)) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() test_mp() test_tp() test_chunksize() shutdown() uqfoundation-pathos-33e3f91/pathos/tests/test_pp.py000066400000000000000000000020211467657623600225720ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from dill import source def run_source(obj): _obj = source._wrap(obj) assert _obj(1.57) == obj(1.57) src = source.getimportable(obj, alias='_f') exec(src, globals()) assert _f(1.57) == obj(1.57) name = source.getname(obj) assert name == obj.__name__ or src.split("=",1)[0].strip() def run_ppmap(obj): from pathos.pools import ParallelPool p = ParallelPool(2) x = [1,2,3] assert list(map(obj, x)) == p.map(obj, x) p.clear() def test_pp(): from math import sin f = lambda x:x+1 def g(x): return x+2 for func in [g, f, abs, sin]: run_source(func) run_ppmap(func) if __name__ == '__main__': test_pp() uqfoundation-pathos-33e3f91/pathos/tests/test_random.py000066400000000000000000000030411467657623600234360ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2023-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.pools import * try: import numpy HASNUMPY = True except ImportError: HASNUMPY = False def random(x): from pathos.helpers import mp_helper as mp return mp.random_state('random').random()+x def rand(x): from pathos.helpers import mp_helper as mp return mp.random_state('numpy.random').rand()+x def wrong1(x): import random return random.random()+x def wrong2(x): import numpy return numpy.random.rand()+x def check_random(pool): res = pool.map(random, range(2)) assert res[0] != res[1] if HASNUMPY: res = pool.map(rand, range(2)) assert res[0] != res[1] pool.close() pool.join() pool.clear() return def check_wrong(pool): res = pool.map(wrong1, range(2)) assert res[0] == res[1] if HASNUMPY: res = pool.map(wrong2, range(2)) assert res[0] == res[1] pool.close() pool.join() pool.clear() return def test_random(): check_random(ThreadPool()) check_random(ProcessPool()) check_random(ParallelPool()) def test_wrong(): check_random(ProcessPool()) check_random(ParallelPool()) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() test_random() shutdown() uqfoundation-pathos-33e3f91/pathos/tests/test_star.py000066400000000000000000000114541467657623600231360ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE import time x = list(range(18)) delay = 0.01 items = 20 maxtries = 20 def busy_add(x,y, delay=0.01): import time for n in range(x): x += n for n in range(y): y -= n time.sleep(delay) return x + y def busy_squared(x): import time, random time.sleep(0.01*random.random()) return x*x def squared(x): return x*x def quad_factory(a=1, b=1, c=0): def quad(x): return a*x**2 + b*x + c return quad square_plus_one = quad_factory(2,0,1) x2 = list(map(squared, x)) def check_sanity(pool, verbose=False): if verbose: print(pool) print("x: %s\n" % str(x)) print(pool.map.__name__) # blocking map start = time.time() res = pool.map(squared, x) end = time.time() - start assert res == x2 if verbose: print("time to results: %s" % end) print("y: %s\n" % str(res)) print(pool.imap.__name__) # iterative map start = time.time() res = pool.imap(squared, x) fin = time.time() - start # get result from iterator start = time.time() res = list(res) end = time.time() - start assert res == x2 if verbose: print("time to queue: %s" % fin) print("time to results: %s" % end) print("y: %s\n" % str(res)) print(pool.amap.__name__) # asyncronous map start = time.time() res = pool.amap(squared, x) fin = time.time() - start # get result from result object start = time.time() res = res.get() end = time.time() - start assert res == x2 if verbose: print("time to queue: %s" % fin) print("time to results: %s" % end) print("y: %s\n" % str(res)) def check_maps(pool, items=4, delay=0): _x = range(-items//2,items//2,2) _y = range(len(_x)) _d = [delay]*len(_x) _z = [0]*len(_x) #print(map) res1 = list(map(squared, _x)) res2 = list(map(busy_add, _x, _y, _z)) #print(pool.map) _res1 = pool.map(squared, _x) _res2 = pool.map(busy_add, _x, _y, _d) assert _res1 == res1 assert _res2 == res2 #print(pool.imap) _res1 = pool.imap(squared, _x) _res2 = pool.imap(busy_add, _x, _y, _d) assert list(_res1) == res1 assert list(_res2) == res2 #print(pool.uimap) _res1 = pool.uimap(squared, _x) _res2 = pool.uimap(busy_add, _x, _y, _d) assert sorted(_res1) == sorted(res1) assert sorted(_res2) == sorted(res2) #print(pool.amap) _res1 = pool.amap(squared, _x) _res2 = pool.amap(busy_add, _x, _y, _d) assert _res1.get() == res1 assert _res2.get() == res2 #print("") def check_dill(pool, verbose=False): # test function that should fail in pickle if verbose: print(pool) print("x: %s\n" % str(x)) print(pool.map.__name__) #start = time.time() try: res = pool.map(square_plus_one, x) except: assert False # should use a smarter test here... #end = time.time() - start # print("time to results: %s" % end) print("y: %s\n" % str(res)) assert True def check_ready(pool, maxtries, delay, verbose=True): if verbose: print(pool) m = pool.amap(busy_squared, x)# x) # print(m.ready()) # print(m.wait(0)) tries = 0 while not m.ready(): time.sleep(delay) tries += 1 if verbose: print("TRY: %s" % tries) if tries >= maxtries: if verbose: print("TIMEOUT") break #print(m.ready()) # print(m.get(0)) res = m.get() if verbose: print(res) z = [0]*len(x) assert res == list(map(squared, x))# x, z) assert tries > 0 assert maxtries > tries #should be True, may not be if CPU is SLOW def test_mp(): from pathos.pools import ProcessPool as Pool pool = Pool(nodes=4) check_sanity( pool ) check_maps( pool, items, delay ) check_dill( pool ) check_ready( pool, maxtries, delay, verbose=False ) def test_tp(): from pathos.pools import ThreadPool as Pool pool = Pool(nodes=4) check_sanity( pool ) check_maps( pool, items, delay ) check_dill( pool ) check_ready( pool, maxtries, delay, verbose=False ) def test_pp(): from pathos.pools import ParallelPool as Pool pool = Pool(nodes=4) check_sanity( pool ) check_maps( pool, items, delay ) check_dill( pool ) check_ready( pool, maxtries, delay, verbose=False ) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() test_mp() test_tp() test_pp() shutdown() uqfoundation-pathos-33e3f91/pathos/tests/test_with.py000066400000000000000000000033321467657623600231340ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE PRIMES = [ 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 1099726899285419] def is_prime(n): if n % 2 == 0: return False import math sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def sleep_add1(x): from time import sleep if x < 4: sleep(x/10.0) return x+1 def sleep_add2(x): from time import sleep if x < 4: sleep(x/10.0) return x+2 def run_with_multipool(Pool): inputs = range(10) with Pool() as pool1: res1 = pool1.amap(sleep_add1, inputs) with Pool() as pool2: res2 = pool2.amap(sleep_add2, inputs) with Pool() as pool3: for number, prime in zip(PRIMES, pool3.imap(is_prime, PRIMES)): assert prime if number != PRIMES[-1] else not prime #print ('%d is prime: %s' % (number, prime)) assert res1.get() == [i+1 for i in inputs] assert res2.get() == [i+2 for i in inputs] def test_with_pp(): from pathos.pools import ParallelPool run_with_multipool(ParallelPool) def test_with_mp(): from pathos.pools import ProcessPool run_with_multipool(ProcessPool) if __name__ == '__main__': from pathos.helpers import freeze_support, shutdown freeze_support() test_with_mp() test_with_pp() shutdown() uqfoundation-pathos-33e3f91/pathos/threading.py000066400000000000000000000214671467657623600217360ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ This module contains map and pipe interfaces to python's threading module. Pipe methods provided: pipe - blocking communication pipe [returns: value] apipe - asynchronous communication pipe [returns: object] Map methods provided: map - blocking and ordered worker pool [returns: list] imap - non-blocking and ordered worker pool [returns: iterator] uimap - non-blocking and unordered worker pool [returns: iterator] amap - asynchronous worker pool [returns: object] Usage ===== A typical call to a pathos threading map will roughly follow this example: >>> # instantiate and configure the worker pool >>> from pathos.threading import ThreadPool >>> pool = ThreadPool(nodes=4) >>> >>> # do a blocking map on the chosen function >>> print(pool.map(pow, [1,2,3,4], [5,6,7,8])) >>> >>> # do a non-blocking map, then extract the results from the iterator >>> results = pool.imap(pow, [1,2,3,4], [5,6,7,8]) >>> print("...") >>> print(list(results)) >>> >>> # do an asynchronous map, then get the results >>> results = pool.amap(pow, [1,2,3,4], [5,6,7,8]) >>> while not results.ready(): ... time.sleep(5); print(".", end=' ') ... >>> print(results.get()) >>> >>> # do one item at a time, using a pipe >>> print(pool.pipe(pow, 1, 5)) >>> print(pool.pipe(pow, 2, 6)) >>> >>> # do one item at a time, using an asynchronous pipe >>> result1 = pool.apipe(pow, 1, 5) >>> result2 = pool.apipe(pow, 2, 6) >>> print(result1.get()) >>> print(result2.get()) Notes ===== This worker pool leverages the python's multiprocessing.dummy module, and thus has many of the limitations associated with that module. The function f and the sequences in args must be serializable. The maps in this worker pool have full functionality whether run from a script or in the python interpreter, and work reliably for both imported and interactively-defined functions. Unlike python's multiprocessing.dummy module, pathos.threading maps can directly utilize functions that require multiple arguments. """ __all__ = ['ThreadPool','_ThreadPool'] #FIXME: probably not good enough... should store each instance with a uid __STATE = _ThreadPool__STATE = {} from pathos.abstract_launcher import AbstractWorkerPool from pathos.helpers.mp_helper import starargs as star from pathos.helpers import cpu_count, ThreadPool as _ThreadPool class ThreadPool(AbstractWorkerPool): """ Mapper that leverages python's threading. """ def __init__(self, *args, **kwds): """\nNOTE: if number of nodes is not given, will autodetect processors. \nNOTE: additional keyword input is optional, with: id - identifier for the pool initializer - function that takes no input, called when node is spawned initargs - tuple of args for initializers that have args """ hasnodes = 'nodes' in kwds; arglen = len(args) if 'nthreads' in kwds and (hasnodes or arglen): msg = "got multiple values for keyword argument 'nthreads'" raise TypeError(msg) elif hasnodes: #XXX: multiple try/except is faster? if arglen: msg = "got multiple values for keyword argument 'nodes'" raise TypeError(msg) kwds['nthreads'] = kwds.pop('nodes') elif arglen: kwds['nthreads'] = args[0] if 'processes' in kwds: if 'nthreads' in kwds: msg = "got multiple values for keyword argument 'processes'" raise TypeError(msg) kwds['nthreads'] = kwds.pop('processes') self.__nodes = kwds.pop('nthreads', cpu_count()) # Create an identifier for the pool self._id = kwds.pop('id', None) #'threads' if self._id is None: self._id = self.__nodes self._kwds = kwds # Create a new server if one isn't already initialized self._serve() return if AbstractWorkerPool.__init__.__doc__: __init__.__doc__ = AbstractWorkerPool.__init__.__doc__ + __init__.__doc__ #def __exit__(self, *args): # self._clear() # return def _serve(self, nodes=None): #XXX: should be STATE method; use id """Create a new server if one isn't already initialized""" if nodes is None: nodes = self.__nodes _pool = __STATE.get(self._id, None) if not _pool or nodes != _pool.__nodes or self._kwds != _pool._kwds: self._clear() _pool = _ThreadPool(nodes, **self._kwds) _pool.__nodes = nodes _pool._kwds = self._kwds __STATE[self._id] = _pool return _pool def _clear(self): #XXX: should be STATE method; use id """Remove server with matching state""" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes and self._kwds == _pool._kwds: _pool.close() _pool.join() __STATE.pop(self._id, None) return #XXX: return _pool? clear = _clear def map(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) _pool = self._serve() return _pool.map(star(f), zip(*args), **kwds) map.__doc__ = AbstractWorkerPool.map.__doc__ def imap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) _pool = self._serve() return _pool.imap(star(f), zip(*args), **kwds) imap.__doc__ = AbstractWorkerPool.imap.__doc__ def uimap(self, f, *args, **kwds): AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds) _pool = self._serve() return _pool.imap_unordered(star(f), zip(*args), **kwds) uimap.__doc__ = AbstractWorkerPool.uimap.__doc__ def amap(self, f, *args, **kwds): # register a callback ? AbstractWorkerPool._AbstractWorkerPool__map(self, f, *args, **kwds) _pool = self._serve() return _pool.map_async(star(f), zip(*args), **kwds) amap.__doc__ = AbstractWorkerPool.amap.__doc__ ######################################################################## # PIPES def pipe(self, f, *args, **kwds): #AbstractWorkerPool._AbstractWorkerPool__pipe(self, f, *args, **kwds) _pool = self._serve() return _pool.apply(f, args, kwds) #pipe.__doc__ = AbstractWorkerPool.pipe.__doc__ def apipe(self, f, *args, **kwds): # register a callback ? #AbstractWorkerPool._AbstractWorkerPool__apipe(self, f, *args, **kwds) _pool = self._serve() return _pool.apply_async(f, args, kwds) #apipe.__doc__ = AbstractWorkerPool.apipe.__doc__ ######################################################################## def __repr__(self): mapargs = (self.__class__.__name__, self.nthreads) return "" % mapargs def __get_nodes(self): """get the number of nodes used in the map""" return self.__nodes def __set_nodes(self, nodes): """set the number of nodes used in the map""" self._serve(nodes) self.__nodes = nodes return ######################################################################## def restart(self, force=False): "restart a closed pool" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes and self._kwds == _pool._kwds: RUN = 0 if not force: assert _pool._state != RUN # essentially, 'clear' and 'serve' self._clear() _pool = _ThreadPool(self.__nodes, **self._kwds) _pool.__nodes = self.__nodes _pool._kwds = self._kwds __STATE[self._id] = _pool return _pool def close(self): "close the pool to any new jobs" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes: _pool.close() return def terminate(self): "a more abrupt close" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes: _pool.terminate() return def join(self): "cleanup the closed worker processes" _pool = __STATE.get(self._id, None) if _pool and self.__nodes == _pool.__nodes: _pool.join() return # interface nthreads = property(__get_nodes, __set_nodes) nodes = property(__get_nodes, __set_nodes) __state__ = __STATE pass # EOF uqfoundation-pathos-33e3f91/pathos/util.py000066400000000000000000000045011467657623600207340ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # adapted from J. Kim & M. McKerns utility functions """ utilities for distributed computing """ import os def _str(byte, codec=None): """convert bytes to string using the given codec (default is 'ascii')""" if codec is False or not hasattr(byte, 'decode'): return byte return byte.decode(codec or 'ascii') def _b(string, codec=None): """convert string to bytes using the given codec (default is 'ascii')""" if codec is False or not hasattr(string, 'encode'): return string return string.encode(codec or 'ascii') def print_exc_info(): """thread-safe return of string from print_exception call""" import traceback import io sio = io.StringIO() traceback.print_exc(file=sio) #thread-safe print_exception to string sio.seek(0, 0) return sio.read() def spawn(onParent, onChild): """a unidirectional fork wrapper Calls onParent(pid, fromchild) in parent process, onChild(pid, toparent) in child process. """ c2pread, c2pwrite = os.pipe() pid = os.fork() if pid > 0: os.close(c2pwrite) fromchild = os.fdopen(c2pread, 'rb') return onParent(pid, fromchild) os.close(c2pread) toparent = os.fdopen(c2pwrite, 'wb', 0) pid = os.getpid() return onChild(pid, toparent) def spawn2(onParent, onChild): """a bidirectional fork wrapper Calls onParent(pid, fromchild, tochild) in parent process, onChild(pid, fromparent, toparent) in child process. """ p2cread, p2cwrite = os.pipe() c2pread, c2pwrite = os.pipe() pid = os.fork() if pid > 0: os.close(p2cread) os.close(c2pwrite) fromchild = os.fdopen(c2pread, 'rb') tochild = os.fdopen(p2cwrite, 'wb', 0) return onParent(pid, fromchild, tochild) os.close(p2cwrite) os.close(c2pread) fromparent = os.fdopen(p2cread, 'rb') toparent = os.fdopen(c2pwrite, 'wb', 0) pid = os.getpid() return onChild(pid, fromparent, toparent) # End of file uqfoundation-pathos-33e3f91/pathos/xmlrpc/000077500000000000000000000000001467657623600207125ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/pathos/xmlrpc/__init__.py000066400000000000000000000006211467657623600230220ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from .server import XMLRPCServer, XMLRPCRequestHandler uqfoundation-pathos-33e3f91/pathos/xmlrpc/server.py000066400000000000000000000205431467657623600225760ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE # # adapted from J. Kim's XMLRPC server and request handler classes """ This module contains the base class for pathos XML-RPC servers, and derives from python's SimpleXMLRPCServer, and the base class for XML-RPC request handlers, which derives from python's base HTTP request handler. Usage ===== A typical setup for an XML-RPC server will roughly follow this example: >>> # establish a XML-RPC server on a host at a given port >>> host = 'localhost' >>> port = 1234 >>> server = XMLRPCServer(host, port) >>> print('port=%d' % server.port) >>> >>> # register a method the server can handle requests for >>> def add(x, y): ... return x + y >>> server.register_function(add) >>> >>> # activate the callback methods and begin serving requests >>> server.activate() >>> server.serve() The following is an example of how to make requests to the above server: >>> # establish a proxy connection to the server at (host,port) >>> host = 'localhost' >>> port = 1234 >>> proxy = xmlrpclib.ServerProxy('http://%s:%d' % (host, port)) >>> print('1 + 2 = %s' % proxy.add(1, 2)) >>> print('3 + 4 = %s' % proxy.add(3, 4)) """ __all__ = ['XMLRPCServer','XMLRPCRequestHandler'] import os import socket import xmlrpc.client as client from http.server import BaseHTTPRequestHandler from xmlrpc.server import SimpleXMLRPCDispatcher from pathos.server import Server #XXX: pythia-0.6, was pyre.ipc.Server from pathos.util import print_exc_info, spawn2, _str, _b from pathos import logger class XMLRPCServer(Server, SimpleXMLRPCDispatcher): '''extends base pathos server to an XML-RPC dispatcher''' def activate(self): """install callbacks""" Server.activate(self) self._selector.notifyOnReadReady(self._socket, self._onConnection) self._selector.notifyWhenIdle(self._onSelectorIdle) def serve(self): """enter the select loop... and wait for service requests""" timeout = 5 Server.serve(self, 5) def _marshaled_dispatch(self, data, dispatch_method=None): """override SimpleXMLRPCDispatcher._marshaled_dispatch() fault string""" import xmlrpc.client as client from xmlrpc.client import Fault params, method = client.loads(data) # generate response try: if dispatch_method is not None: response = dispatch_method(method, params) else: response = self._dispatch(method, params) # wrap response in a singleton tuple response = (response,) response = client.dumps(response, methodresponse=1) except Fault as fault: # breaks 2.5 compatibility fault.faultString = print_exc_info() response = client.dumps(fault) except: # report exception back to server response = client.dumps( client.Fault(1, "\n%s" % print_exc_info()) ) return _b(response) def _registerChild(self, pid, fromchild): """register a child process so it can be retrieved on select events""" self._activeProcesses[fromchild] = pid self._selector.notifyOnReadReady(fromchild, self._handleMessageFromChild) def _unRegisterChild(self, fd): """remove a child process from active process register""" del self._activeProcesses[fd] def _handleMessageFromChild(self, selector, fd): """handler for message from a child process""" line = _str(fd.readline()) if line[:4] == 'done': pid = self._activeProcesses[fd] os.waitpid(pid, 0) self._unRegisterChild(fd) def _onSelectorIdle(self, selector): '''something to do when there's no requests''' return True def _installSocket(self, host, port): """prepare a listening socket""" from pathos.portpicker import portnumber s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) if port == 0: #Get a random port pick = portnumber(min=port, max=64*1024) while True: try: port = pick() s.bind((host, port)) break except socket.error: continue else: #Designated port s.bind((host, port)) s.listen(10) self._socket = s self.host = host self.port = port return def _onConnection(self, selector, fd): '''upon socket connection, establish a request handler''' if isinstance(fd, socket.SocketType): return self._onSocketConnection(fd) return None def _onSocketConnection(self, socket): '''upon socket connections, establish a request handler''' conn, addr = socket.accept() handler = XMLRPCRequestHandler(server=self, socket=conn) handler.handle() return True def __init__(self, host, port): '''create a XML-RPC server Takes two initial inputs: host -- hostname of XML-RPC server host port -- port number for server requests ''' Server.__init__(self) SimpleXMLRPCDispatcher.__init__(self,allow_none=False,encoding=None) self._installSocket(host, port) self._activeProcesses = {} #{ fd : pid } class XMLRPCRequestHandler(BaseHTTPRequestHandler): ''' create a XML-RPC request handler ''' _debug = logger(name="pathos.xmlrpc", level=30) # logging.WARN def do_POST(self): """ Access point from HTTP handler """ def onParent(pid, fromchild, tochild): self._server._registerChild(pid, fromchild) tochild.write(_b('done\n')) tochild.flush() def onChild(pid, fromparent, toparent): try: response = self._server._marshaled_dispatch(data) self._sendResponse(response) line = _str(fromparent.readline()) toparent.write(_b('done\n')) toparent.flush() except: logger(name='pathos.xmlrpc', level=30).error(print_exc_info()) os._exit(0) try: data = self.rfile.read(int(self.headers['content-length'])) params, method = client.loads(data) if method == 'run': #XXX: _str? return spawn2(onParent, onChild) else: response = self._server._marshaled_dispatch(data) self._sendResponse(response) return except: self._debug.error(print_exc_info()) self.send_response(500) self.end_headers() return def log_message(self, format, *args): """ Overriding BaseHTTPRequestHandler.log_message() """ self._debug.info("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args)) def _sendResponse(self, response): """ Write the XML-RPC response """ response = _b(response) self.send_response(200) self.send_header("Content-type", "text/xml") self.send_header("Content-length", str(len(response))) self.end_headers() self.wfile.write(response) self.wfile.flush() self.connection.shutdown(1) def __init__(self, server, socket): """ Override BaseHTTPRequestHandler.__init__(): we need to be able to have (potentially) multiple handler objects at a given time. Inputs: server -- server object to handle requests for socket -- socket connection """ ## Settings required by BaseHTTPRequestHandler self.rfile = socket.makefile('rb', -1) self.wfile = socket.makefile('wb', 0) self.connection = socket self.client_address = (server.host, server.port) self._server = server if __name__ == '__main__': pass # End of file uqfoundation-pathos-33e3f91/pyproject.toml000066400000000000000000000002461467657623600210250ustar00rootroot00000000000000[build-system] # Further build requirements come from setup.py via the PEP 517 interface requires = [ "setuptools>=42", ] build-backend = "setuptools.build_meta" uqfoundation-pathos-33e3f91/scripts/000077500000000000000000000000001467657623600175765ustar00rootroot00000000000000uqfoundation-pathos-33e3f91/scripts/pathos_connect000066400000000000000000000135121467657623600225320ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE """ connect to the specified machine and start a 'server', 'tunnel', or both Notes: Usage: pathos_connect [hostname] [server] [remoteport] [profile] [hostname] - name of the host to connect to [server] - name of RPC server (assumes is installed on host) or 'tunnel' [remoteport] - remote port to use for communication or 'tunnel' [profile] -- name of shell profile to source on remote environment Examples:: $ pathos_connect computer.college.edu ppserver tunnel Usage: pathos_connect [hostname] [server] [remoteport] [profile] [hostname] - name of the host to connect to [server] - name of RPC server (assumes is installed on host) or 'tunnel' [remoteport] - remote port to use for communication or 'tunnel' [profile] -- name of shell profile to source on remote environment defaults are: "localhost" "tunnel" "" "" executing {ssh -N -L 22921:computer.college.edu:15058}' Server running at port=15058 with pid=4110 Connected to localhost at port=22921 Press to kill server """ ## tunnel: pathos_connect college.edu tunnel ## server: pathos_connect college.edu ppserver 12345 .profile ## both: pathos_connect college.edu ppserver tunnel .profile from pathos.core import * from pathos.hosts import get_profile, register_profiles if __name__ == '__main__': ##### CONFIGURATION & INPUT ######################## # set the default remote host rhost = 'localhost' #rhost = 'foobar.internet.org' #rhost = 'computer.college.edu' # set any 'special' profiles (those which don't use default_profie) profiles = {} #profiles = {'foobar.internet.org':'.profile', # 'computer.college.edu':'.cshrc'} # set the default port rport = '' _rport = '98909' # set the default server command server = 'tunnel' #server = 'ppserver' #XXX: "ppserver -p %s" % rport #server = 'classic_server' #XXX: "classic_server -p %s" % rport #server = 'registry_server' #XXX: "registry_server -p %s" % rport print("""Usage: pathos_connect [hostname] [remoteport] [server] [profile] Usage: pathos_connect [hostname] [server] [remoteport] [profile] [hostname] - name of the host to connect to [server] - name of RPC server (assumes is installed on host) or 'tunnel' [remoteport] - remote port to use for communication or 'tunnel' [profile] -- name of shell profile to source on remote environment defaults are: "%s" "%s" "%s" "%s".""" % (rhost, server, rport, '')) # get remote hostname from user import sys if '--help' in sys.argv: sys.exit(0) try: myinp = sys.argv[1] except: myinp = None if myinp: rhost = myinp #XXX: should test rhost validity here... (how ?) else: pass # use default del myinp # get server to run from user try: myinp = sys.argv[2] except: myinp = None if myinp: server = myinp #XXX: should test validity here... (filename) else: pass # use default del myinp # set the default 'port' if server == 'tunnel': tunnel = True server = None else: tunnel = False rport = rport if tunnel else _rport # get remote port to run server on from user try: myinp = sys.argv[3] except: myinp = None if myinp: if tunnel: # tunnel doesn't take more inputs msg = "port '%s' not valid for 'tunnel'" % myinp raise ValueError(msg) rport = myinp #XXX: should test validity here... (filename) else: pass # use default del myinp # is it a tunneled server? tunnel = True if (tunnel or rport == 'tunnel') else False rport = '' if rport == 'tunnel' else rport # get remote profile (this should go away soon) try: myinp = sys.argv[4] except: myinp = None if myinp: rprof = myinp #XXX: should test validity here... (filename) profiles = {rhost:rprof} else: pass # use default del myinp # my remote environment (should be auto-detected) register_profiles(profiles) profile = get_profile(rhost) ##### CONFIGURATION & INPUT ######################## ## tunnel: pathos_connect foo.college.edu tunnel ## server: pathos_connect foo.college.edu ppserver 12345 .profile ## both: pathos_connect foo.college.edu ppserver tunnel .profile if tunnel: # establish ssh tunnel tunnel = connect(rhost) lport = tunnel._lport rport = tunnel._rport print('executing {ssh -N -L %d:%s:%d}' % (lport, rhost, rport)) else: lport = '' if server: # run server rserver = serve(server, rhost, rport, profile=profile) response = rserver.response() if response: if tunnel: tunnel.disconnect() print(response) raise OSError('Failure to start server') # get server pid #FIXME: launcher.pid is not pid(server) target = '[P,p]ython[^#]*'+server # filter w/ regex for python-based server try: pid = getpid(target, rhost) except OSError: print("Cleanup on host may be required...") if tunnel: tunnel.disconnect() raise # test server # XXX: add a simple one-liner... print("\nServer running at port=%s with pid=%s" % (rport, pid)) if tunnel: print("Connected to localhost at port=%s" % (lport)) print('Press to kill server') else: print('Press to disconnect') sys.stdin.readline() if server: # stop server print(kill(pid,rhost)) # del rserver #XXX: delete should run self.kill (?) if tunnel: # disconnect tunnel tunnel.disconnect() # FIXME: just kills 'ssh', not the tunnel # get local pid: ps u | grep "ssh -N -L%s:%s$s" % (lport,rhost,rport) # kill -15 int(tunnelpid) # EOF uqfoundation-pathos-33e3f91/scripts/portpicker000077500000000000000000000006531467657623600217120ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE from pathos.portpicker import portnumber, __doc__ if __name__ == '__main__': pick = portnumber(min=1024,max=65535) print( pick() ) uqfoundation-pathos-33e3f91/setup.cfg000066400000000000000000000001761467657623600177340ustar00rootroot00000000000000[egg_info] #tag_build = .dev0 [bdist_wheel] #python-tag = py3 #plat-name = manylinux_2_28_x86_64 [sdist] #formats=zip,gztar uqfoundation-pathos-33e3f91/setup.py000066400000000000000000000113411467657623600176210ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE import os import sys # drop support for older python if sys.version_info < (3, 8): unsupported = 'Versions of Python before 3.8 are not supported' raise ValueError(unsupported) # get distribution meta info here = os.path.abspath(os.path.dirname(__file__)) sys.path.append(here) from version import (__version__, __author__, __contact__ as AUTHOR_EMAIL, get_license_text, get_readme_as_rst, write_info_file) LICENSE = get_license_text(os.path.join(here, 'LICENSE')) README = get_readme_as_rst(os.path.join(here, 'README.md')) # write meta info file write_info_file(here, 'pathos', doc=README, license=LICENSE, version=__version__, author=__author__) del here, get_license_text, get_readme_as_rst, write_info_file # check if setuptools is available try: from setuptools import setup from setuptools.dist import Distribution has_setuptools = True except ImportError: from distutils.core import setup Distribution = object has_setuptools = False # build the 'setup' call setup_kwds = dict( name="pathos", version=__version__, description="parallel graph management and execution in heterogeneous computing", long_description = README.strip(), author = __author__, author_email = AUTHOR_EMAIL, maintainer = __author__, maintainer_email = AUTHOR_EMAIL, license = 'BSD-3-Clause', platforms = ['Linux', 'Windows', 'Mac'], url = 'https://github.com/uqfoundation/pathos', download_url = 'https://pypi.org/project/pathos/#files', project_urls = { 'Documentation':'http://pathos.rtfd.io', 'Source Code':'https://github.com/uqfoundation/pathos', 'Bug Tracker':'https://github.com/uqfoundation/pathos/issues', }, python_requires = '>=3.8', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Scientific/Engineering', 'Topic :: Software Development', ], packages=['pathos','pathos.tests',\ 'pathos.helpers','pathos.secure','pathos.xmlrpc'], package_dir={'pathos':'pathos', 'pathos.tests':'pathos/tests', \ 'pathos.helpers':'pathos/helpers', \ 'pathos.secure':'pathos/secure', \ 'pathos.xmlrpc':'pathos/xmlrpc', \ }, scripts=['scripts/pathos_connect', 'scripts/portpicker'], ) # force python-, abi-, and platform-specific naming of bdist_wheel class BinaryDistribution(Distribution): """Distribution which forces a binary package with platform name""" def has_ext_modules(foo): return True # define dependencies ppft_version = 'ppft>=1.7.6.9' dill_version = 'dill>=0.3.9' pox_version = 'pox>=0.3.5' mp_version = 'multiprocess>=0.70.17' pyina_version = 'pyina>=0.2.9' mystic_version = 'mystic>=0.4.2' # add dependencies depend = [ppft_version, dill_version, pox_version, mp_version] extras = {'examples': [mystic_version, pyina_version]} # update setup kwds if has_setuptools: setup_kwds.update( zip_safe=False, # distclass=BinaryDistribution, install_requires=depend, # extras_require=extras, ) # call setup setup(**setup_kwds) # if dependencies are missing, print a warning try: import ppft import dill import pox import multiprocess #import mystic #import pyina except ImportError: print("\n***********************************************************") print("WARNING: One of the following dependencies is unresolved:") print(" %s" % ppft_version) print(" %s" % dill_version) print(" %s" % pox_version) print(" %s" % mp_version) #print(" %s (optional)" % mystic_version) #print(" %s (optional)" % pyina_version) print("***********************************************************\n") if __name__=='__main__': pass # end of file uqfoundation-pathos-33e3f91/tox.ini000066400000000000000000000004401467657623600174200ustar00rootroot00000000000000[tox] skip_missing_interpreters= True envlist = py38 py39 py310 py311 py312 py313 pypy38 pypy39 pypy310 [testenv] deps = # numpy whitelist_externals = # bash commands = {envpython} -m pip install . {envpython} pathos/tests/__main__.py uqfoundation-pathos-33e3f91/version.py000066400000000000000000000061521467657623600201520ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pathos/blob/master/LICENSE __version__ = '0.3.3'#.dev0' __author__ = 'Mike McKerns' __contact__ = 'mmckerns@uqfoundation.org' def get_license_text(filepath): "open the LICENSE file and read the contents" try: LICENSE = open(filepath).read() except: LICENSE = '' return LICENSE def get_readme_as_rst(filepath): "open the README file and read the markdown as rst" try: fh = open(filepath) name, null = fh.readline().rstrip(), fh.readline() tag, null = fh.readline(), fh.readline() tag = "%s: %s" % (name, tag) split = '-'*(len(tag)-1)+'\n' README = ''.join((null,split,tag,split,'\n')) skip = False for line in fh: if line.startswith('['): continue elif skip and line.startswith(' http'): README += '\n' + line elif line.startswith('* '): README += line.replace('* ',' - ',1) elif line.startswith('-'): README += line.replace('-','=') + '\n' elif line.startswith('!['): # image alt,img = line.split('](',1) if img.startswith('docs'): # relative path img = img.split('docs/source/',1)[-1] # make is in docs README += '.. image:: ' + img.replace(')','') README += ' :alt: ' + alt.replace('![','') + '\n' #elif ')[http' in line: # alt text link (`text `_) else: README += line skip = line.endswith(':\n') fh.close() except: README = '' return README def write_info_file(dirpath, modulename, **info): """write the given info to 'modulename/__info__.py' info expects: doc: the module's long_description version: the module's version string author: the module's author string license: the module's license contents """ import os infofile = os.path.join(dirpath, '%s/__info__.py' % modulename) header = '''#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/%s/blob/master/LICENSE ''' % modulename #XXX: author and email are hardwired in the header doc = info.get('doc', None) version = info.get('version', None) author = info.get('author', None) license = info.get('license', None) with open(infofile, 'w') as fh: fh.write(header) if doc is not None: fh.write("'''%s'''\n\n" % doc) if version is not None: fh.write("__version__ = %r\n" % version) if author is not None: fh.write("__author__ = %r\n\n" % author) if license is not None: fh.write("__license__ = '''\n%s'''\n" % license) return