pax_global_header00006660000000000000000000000064146766004030014520gustar00rootroot0000000000000052 comment=c629452ee3d1fc377fb8938463aafcfe81c2f4bf uqfoundation-pyina-c629452/000077500000000000000000000000001467660040300156325ustar00rootroot00000000000000uqfoundation-pyina-c629452/.codecov.yml000066400000000000000000000014771467660040300200660ustar00rootroot00000000000000comment: false coverage: status: project: default: # Commits pushed to master should not make the overall # project coverage decrease by more than 1%: target: auto threshold: 1% patch: default: # Be tolerant on slight code coverage diff on PRs to limit # noisy red coverage status on github PRs. # Note The coverage stats are still uploaded # to codecov so that PR reviewers can see uncovered lines # in the github diff if they install the codecov browser # extension: # https://github.com/codecov/browser-extension target: auto threshold: 1% fixes: # reduces pip-installed path to git root and # remove dist-name from setup-installed path - "*/site-packages/::" - "*/site-packages/pyina-*::" uqfoundation-pyina-c629452/.coveragerc000066400000000000000000000010521467660040300177510ustar00rootroot00000000000000[run] # source = pyina include = */pyina/* omit = */tests/* */info.py branch = true # timid = true # parallel = true # and need to 'combine' data files # concurrency = multiprocessing # thread # data_file = $TRAVIS_BUILD_DIR/.coverage # debug = trace [paths] source = pyina */site-packages/pyina */site-packages/pyina-*/pyina [report] include = */pyina/* exclude_lines = pragma: no cover raise NotImplementedError if __name__ == .__main__.: # show_missing = true ignore_errors = true # pragma: no branch # noqa uqfoundation-pyina-c629452/.gitignore000066400000000000000000000001021467660040300176130ustar00rootroot00000000000000.tox/ .cache/ *.egg-info/ *.pyc pyina/info.py dist/ README .idea/ uqfoundation-pyina-c629452/.readthedocs.yml000066400000000000000000000005151467660040300207210ustar00rootroot00000000000000# readthedocs configuration file # see https://docs.readthedocs.io/en/stable/config-file/v2.html version: 2 # configure sphinx: configuration: docs/source/conf.py # build build: os: ubuntu-22.04 tools: python: "3.10" # install python: install: - method: pip path: . - requirements: docs/requirements.txt uqfoundation-pyina-c629452/.travis.yml000066400000000000000000000040241467660040300177430ustar00rootroot00000000000000dist: jammy os: linux sudo: required language: python matrix: include: - python: '3.8' env: - RDMAV_FORK_SAFE=1 - python: '3.9' env: - COVERAGE="true" - RDMAV_FORK_SAFE=1 - python: '3.10' env: - RDMAV_FORK_SAFE=1 - python: '3.11' env: - RDMAV_FORK_SAFE=1 - python: '3.12' env: - RDMAV_FORK_SAFE=1 - python: '3.13-dev' env: - CYTHON="true" # numpy source build - DILL="master" - RDMAV_FORK_SAFE=1 - python: 'pypy3.8-7.3.9' # at 7.3.11 env: - RDMAV_FORK_SAFE=1 - python: 'pypy3.9-7.3.9' # at 7.3.16 env: - RDMAV_FORK_SAFE=1 - python: 'pypy3.10-7.3.17' env: - RDMAV_FORK_SAFE=1 allow_failures: - python: 'pypy3.9-7.3.9' # undefined symbol - python: 'pypy3.10-7.3.17' # CI missing fast_finish: true cache: pip: true apt: true before_install: - set -e # fail on any error - sudo apt-get update -q - set -x; sudo apt-get install -y -q mpich libmpich-dev # openmpi-bin libopenmpi-dev - if [[ $COVERAGE == "true" ]]; then pip install coverage; fi - if [[ $CYTHON == "true" ]]; then pip install "cython<0.29.25"; fi #FIXME - if [[ $DILL == "master" ]]; then pip install "https://github.com/uqfoundation/dill/archive/master.tar.gz"; fi install: - python -m pip install . script: - for test in pyina/tests/__init__.py; do echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; done - for test in pyina/tests/test_*.py; do echo $test ; if [[ $COVERAGE == "true" ]]; then coverage run -a $test > /dev/null; else python $test > /dev/null; fi ; done after_success: - if [[ $COVERAGE == "true" ]]; then bash <(curl -s https://codecov.io/bash); else echo ''; fi - if [[ $COVERAGE == "true" ]]; then coverage report; fi uqfoundation-pyina-c629452/LICENSE000066400000000000000000000033761467660040300166500ustar00rootroot00000000000000Copyright (c) 2004-2016 California Institute of Technology. Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. All rights reserved. This software is available subject to the conditions and terms laid out below. By downloading and using this software you are agreeing to the following conditions. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - Neither the names of the copyright holders nor the names of any of the contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. uqfoundation-pyina-c629452/MANIFEST.in000066400000000000000000000003621467660040300173710ustar00rootroot00000000000000include LICENSE include README* include MANIFEST.in include pyproject.toml include tox.ini include version.py recursive-include docs * recursive-include examples * recursive-include scripts * include .* prune .git prune .coverage prune .eggs uqfoundation-pyina-c629452/README.md000066400000000000000000000142461467660040300171200ustar00rootroot00000000000000pyina ===== MPI parallel map and cluster scheduling About Pyina ----------- The ``pyina`` package provides several basic tools to make MPI-based parallel computing more accessable to the end user. The goal of ``pyina`` is to allow the user to extend their own code to MPI-based parallel computing with minimal refactoring. The central element of ``pyina`` is the parallel map algorithm. ``pyina`` currently provides two strategies for executing the parallel-map, where a strategy is the algorithm for distributing the work list of jobs across the availble nodes. These strategies can be used *"in-the-raw"* (i.e. directly) to provide the map algorithm to a user's own mpi-aware code. Further, in ``pyina.mpi`` ``pyina`` provides pipe and map implementations (known as *"easy map"*) that hide the MPI internals from the user. With the *"easy map"*, the user can launch their code in parallel batch mode -- using standard Python and without ever having to write a line of MPI code. There are several ways that a user would typically launch their code in parallel -- directly with ``mpirun`` or ``mpiexec``, or through the use of a scheduler such as *torque* or *slurm*. ``pyina`` encapsulates several of these *"launchers"*, and provides a common interface to the different methods of launching a MPI job. ``pyina`` is part of ``pathos``, a Python framework for heterogeneous computing. ``pyina`` is in active development, so any user feedback, bug reports, comments, or suggestions are highly appreciated. A list of issues is located at https://github.com/uqfoundation/pyina/issues, with a legacy list maintained at https://uqfoundation.github.io/project/pathos/query. Major Features -------------- ``pyina`` provides a highly configurable parallel map interface to running MPI jobs, with: * a map interface that extends the Python ``map`` standard * the ability to submit batch jobs to a selection of schedulers * the ability to customize node and process launch configurations * the ability to launch parallel MPI jobs with standard Python * ease in selecting different strategies for processing a work list Current Release [![Downloads](https://static.pepy.tech/personalized-badge/pyina?period=total&units=international_system&left_color=grey&right_color=blue&left_text=pypi%20downloads)](https://pepy.tech/project/pyina) [![Stack Overflow](https://img.shields.io/badge/stackoverflow-get%20help-black.svg)](https://stackoverflow.com/questions/tagged/pyina) --------------- The latest released version of ``pyina`` is available at: https://pypi.org/project/pyina ``pyina`` is distributed under a 3-clause BSD license. Development Version [![Support](https://img.shields.io/badge/support-the%20UQ%20Foundation-purple.svg?style=flat&colorA=grey&colorB=purple)](http://www.uqfoundation.org/pages/donate.html) [![Documentation Status](https://readthedocs.org/projects/pyina/badge/?version=latest)](https://pyina.readthedocs.io/en/latest/?badge=latest) [![Build Status](https://travis-ci.com/uqfoundation/pyina.svg?label=build&logo=travis&branch=master)](https://travis-ci.com/github/uqfoundation/pyina) [![codecov](https://codecov.io/gh/uqfoundation/pyina/branch/master/graph/badge.svg)](https://codecov.io/gh/uqfoundation/pyina) ------------------- You can get the latest development version with all the shiny new features at: https://github.com/uqfoundation If you have a new contribution, please submit a pull request. Installation ------------ ``pyina`` can be installed with ``pip``:: $ pip install pyina A version of MPI must also be installed. Launchers in ``pyina`` that submit to a scheduler will throw errors if the underlying scheduler is not available, however a scheduler is not required for ``pyina`` to execute. Requirements ------------ ``pyina`` requires: * ``python`` (or ``pypy``), **>=3.8** * ``setuptools``, **>=42** * ``cython``, **>=0.29.30** * ``numpy``, **>=1.0** * ``mpi4py``, **>=1.3** * ``dill``, **>=0.3.9** * ``pox``, **>=0.3.5** * ``pathos``, **>=0.3.3** More Information ---------------- Probably the best way to get started is to look at the documentation at http://pyina.rtfd.io. Also see https://github.com/uqfoundation/pyina/tree/master/examples and ``pyina.tests`` for a set of scripts that demonstrate the configuration and launching of mpi-based parallel jobs using the *"easy map"* interface. You can run the tests with ``python -m pyina.tests``. A script is included for querying, setting up, and tearing down an MPI environment, see ``python -m pyina`` for more information. The source code is generally well documented, so further questions may be resolved by inspecting the code itself. Please feel free to submit a ticket on github, or ask a question on stackoverflow (**@Mike McKerns**). If you would like to share how you use ``pyina`` in your work, please send an email (to **mmckerns at uqfoundation dot org**). Important classes and functions are found here: * ``pyina.mpi`` [the map API definition] * ``pyina.schedulers`` [all available schedulers] * ``pyina.launchers`` [all available launchers] Mapping strategies are found here: * ``pyina.mpi_scatter`` [the scatter-gather strategy] * ``pyina.mpi_pool`` [the worker pool strategy] ``pyina`` also provides a convience script that helps navigate the MPI environment. This script can be run from anywhere with:: $ mpi_world If may also be convienent to set a shell alias for the launch of 'raw' mpi-python jobs. Set something like the following (for bash):: $ alias mpython1='mpiexec -np 1 `which python`' $ alias mpython2='mpiexec -np 2 `which python`' $ ... Citation -------- If you use ``pyina`` to do research that leads to publication, we ask that you acknowledge use of ``pyina`` by citing the following in your publication:: M.M. McKerns, L. Strand, T. Sullivan, A. Fang, M.A.G. Aivazis, "Building a framework for predictive science", Proceedings of the 10th Python in Science Conference, 2011; http://arxiv.org/pdf/1202.1056 Michael McKerns and Michael Aivazis, "pathos: a framework for heterogeneous computing", 2010- ; https://uqfoundation.github.io/project/pathos Please see https://uqfoundation.github.io/project/pathos or http://arxiv.org/pdf/1202.1056 for further information. uqfoundation-pyina-c629452/docs/000077500000000000000000000000001467660040300165625ustar00rootroot00000000000000uqfoundation-pyina-c629452/docs/Makefile000066400000000000000000000012351467660040300202230ustar00rootroot00000000000000# Minimal makefile for Sphinx documentation # # You can set these variables from the command line. SPHINXOPTS = SPHINXBUILD = sphinx-build SPHINXPROJ = pyina SOURCEDIR = source BUILDDIR = build # Internal variables ALLSPHINXOPTS = $(SPHINXOPTS) $(SOURCEDIR) # Put it first so that "make" without argument is like "make help". help: @echo "Please use \`make html' to generate standalone HTML files" .PHONY: help clean html Makefile clean: -rm -rf $(BUILDDIR) html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR) -rm -f $(BUILDDIR)/../../scripts/_*py -rm -f $(BUILDDIR)/../../scripts/_*pyc -rm -rf $(BUILDDIR)/../../scripts/__pycache__ uqfoundation-pyina-c629452/docs/requirements.txt000066400000000000000000000023441467660040300220510ustar00rootroot00000000000000# Packages required to build docs # dependencies pinned as: # https://github.com/readthedocs/readthedocs.org/blob/d3606da9907bb4cd933abcf71c7bab9eb20435cd/requirements/docs.txt alabaster==0.7.16 anyio==4.4.0 babel==2.15.0 certifi==2024.7.4 charset-normalizer==3.3.2 click==8.1.7 colorama==0.4.6 docutils==0.20.1 exceptiongroup==1.2.1 h11==0.14.0 idna==3.7 imagesize==1.4.1 jinja2==3.1.4 markdown-it-py==3.0.0 markupsafe==2.1.5 mdit-py-plugins==0.4.1 mdurl==0.1.2 myst-parser==3.0.1 packaging==24.0 pygments==2.18.0 pyyaml==6.0.1 readthedocs-sphinx-search==0.3.2 requests==2.32.3 six==1.16.0 sniffio==1.3.1 snowballstemmer==2.2.0 sphinx==7.3.7 sphinx-autobuild==2024.4.16 sphinx-copybutton==0.5.2 sphinx-design==0.6.0 sphinx-hoverxref==1.4.0 sphinx-intl==2.2.0 sphinx-multiproject==1.0.0rc1 sphinx-notfound-page==1.0.2 sphinx-prompt==1.8.0 sphinx-rtd-theme==2.0.0rc2 sphinx-tabs==3.4.5 sphinxcontrib-applehelp==1.0.8 sphinxcontrib-devhelp==1.0.6 sphinxcontrib-htmlhelp==2.0.5 sphinxcontrib-httpdomain==1.8.1 sphinxcontrib-jquery==4.1 sphinxcontrib-jsmath==1.0.1 sphinxcontrib-qthelp==1.0.7 sphinxcontrib-serializinghtml==1.1.10 starlette==0.37.2 tomli==2.0.1 typing-extensions==4.12.1 urllib3==2.2.2 uvicorn==0.30.0 watchfiles==0.22.0 websockets==12.0 uqfoundation-pyina-c629452/docs/source/000077500000000000000000000000001467660040300200625ustar00rootroot00000000000000uqfoundation-pyina-c629452/docs/source/_static/000077500000000000000000000000001467660040300215105ustar00rootroot00000000000000uqfoundation-pyina-c629452/docs/source/_static/css/000077500000000000000000000000001467660040300223005ustar00rootroot00000000000000uqfoundation-pyina-c629452/docs/source/_static/css/custom.css000066400000000000000000000001251467660040300243220ustar00rootroot00000000000000div.sphinxsidebar { height: 100%; /* 100vh */ overflow: auto; /* overflow-y */ } uqfoundation-pyina-c629452/docs/source/conf.py000066400000000000000000000203651467660040300213670ustar00rootroot00000000000000# -*- coding: utf-8 -*- # # pyina documentation build configuration file, created by # sphinx-quickstart on Tue Aug 8 06:50:58 2017. # # This file is execfile()d with the current directory set to its # containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os from datetime import datetime import sys scripts = os.path.abspath('../../scripts') sys.path.insert(0, scripts) try: os.symlink(scripts+os.sep+'ezscatter', scripts+os.sep+'_ezscatter.py') os.symlink(scripts+os.sep+'ezpool', scripts+os.sep+'_ezpool.py') os.symlink(scripts+os.sep+'mpi_world', scripts+os.sep+'_mpi_world.py') except: pass # Mock mpi4py try: from unittest.mock import MagicMock except ImportError: from mock import Mock as MagicMock class Mock(MagicMock): @classmethod def __getattr__(cls, name): return MagicMock() MOCK_MODULES = ['mpi4py'] sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # Import the project import pyina # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = 'pyina' year = datetime.now().year copyright = '%d, The Uncertainty Quantification Foundation' % year author = 'Mike McKerns' # extension config github_project_url = "https://github.com/uqfoundation/pyina" autoclass_content = 'both' autodoc_default_options = { 'members': True, 'undoc-members': True, 'private-members': True, 'special-members': True, 'show-inheritance': True, 'imported-members': True, 'exclude-members': ( '__dict__,' '__slots__,' '__weakref__,' '__module__,' '_abc_impl,' '__init__,' '__annotations__,' '__dataclass_fields__,' ) } autodoc_typehints = 'description' autodoc_typehints_format = 'short' napoleon_include_private_with_doc = False napoleon_include_special_with_doc = True napoleon_use_ivar = True napoleon_use_param = True # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = pyina.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = 'en' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False # Configure how the modules, functions, etc names look add_module_names = False modindex_common_prefix = ['pyina.'] # -- Options for HTML output ---------------------------------------------- # on_rtd is whether we are on readthedocs.io on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # if not on_rtd: html_theme = 'alabaster' #'bizstyle' html_css_files = ['css/custom.css',] #import sphinx_rtd_theme #html_theme = 'sphinx_rtd_theme' #html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] else: html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = { 'github_user': 'uqfoundation', 'github_repo': 'pyina', 'github_button': False, 'github_banner': True, 'travis_button': True, 'codecov_button': True, 'donate_url': 'http://uqfoundation.org/pages/donate.html', 'gratipay_user': False, # username 'extra_nav_links': {'Module Index': 'py-modindex.html'}, # 'show_related': True, # 'globaltoc_collapse': True, 'globaltoc_maxdepth': 4, 'show_powered_by': False } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # This is required for the alabaster theme # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars if on_rtd: toc_style = 'localtoc.html', # display the toctree else: toc_style = 'globaltoc.html', # collapse the toctree html_sidebars = { '**': [ 'about.html', 'donate.html', 'searchbox.html', # 'navigation.html', toc_style, # defined above 'relations.html', # needs 'show_related':True option to display ] } # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. htmlhelp_basename = 'pyinadoc' # Logo for sidebar html_logo = 'pathos.png' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'pyina.tex', 'pyina Documentation', 'Mike McKerns', 'manual'), ] # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'pyina', 'pyina Documentation', [author], 1) ] # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'pyina', 'pyina Documentation', author, 'pyina', 'MPI parallel map and cluser scheduling.', 'Miscellaneous'), ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/3/': None} # {'python': {'https://docs.python.org/': None}, # 'mystic': {'https://mystic.readthedocs.io/en/latest/', None}, # 'pathos': {'https://pathos.readthedocs.io/en/latest/', None}, # 'pox': {'https://pox.readthedocs.io/en/latest/', None}, # 'dill': {'https://dill.readthedocs.io/en/latest/', None}, # 'multiprocess': {'https://multiprocess.readthedocs.io/en/latest/', None}, # 'ppft': {'https://ppft.readthedocs.io/en/latest/', None}, # 'klepto': {'https://klepto.readthedocs.io/en/latest/', None}, # } uqfoundation-pyina-c629452/docs/source/index.rst000066400000000000000000000004631467660040300217260ustar00rootroot00000000000000.. pyina documentation master file pyina package documentation =========================== .. toctree:: :hidden: :maxdepth: 2 self pyina scripts .. automodule:: pyina .. :exclude-members: + Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` uqfoundation-pyina-c629452/docs/source/pathos.png000066400000000000000000002314661467660040300221020ustar00rootroot00000000000000PNG  IHDRI("WCiCCPICC ProfilexXy8?L-0kf0'EٗDEDDHRHJ$zs>9>ss&A&?J6#>eii KYy `Ԯ)79rLGm8B@2>T)KX@ ޻vq;ཀྵC= @~==~N#AY_B鲮Au\*i@6\PIK? 8h[+wo ys064#ۋPlilnoomooD|p`Ȏ, }(9Pa.x<I#5(gbflU 49-$N? /꠿ؖD$T lU%r# j3BU-SlzeƬ&*!+zkc6wlGԏtvjqreuSte=f>\2*Dm?cs:<8H,"6<"?xJ%6j3vgbXb |b|l%vq WSSCӊ{fgfd^IͿ~/>`K⍭t%enIޖzW\G+ԫT+=~$Y'X~_OX +_mz\ђ,ܵM][gcW nް/* S_x}P[?]V!gDnc}tcX8ç ']jui*o=ӄLwͳvS?b5,a+W=~ot6ކ]|zۨ .%u+Ef [+g .(_B`DDl+KKݓޒ5;"7hܪ눔zF 斖Abp0֨ɈOs. iK=+'kʱK6ٶ۷;:>irCm{,G/0Xo}PycB T܏=)R q7CR2(YbrJ &Jf*jG0,_G uNӗ52DM4YZXYsa3dhW`pM']]/<4q\lOcmA >̈́ēD-p!185;r!X-GHqSKXs8%B,]d)쩹tmgRϺHl=wxQ=;|2%h1cEimbΖ5{uO@)p P@@4 vLبxp3_X {&# (`a0_%MX3#l.gEVpFG# H7d@#JHJӁBT-jV6vN. 0}}# ;C5#=;#4aaJcZg1hcfbcgKbcf_ q|&s/e;Wo?!%+Xz!eL>'aVEDD$ J z"Y"U(}_YܢLģWED4Ǖ5:b3Gi]ж}H?!Dkbaeiheep gCK/uxr3[{ {{{7A'| ROF0GFv1vLy FRg`=[_TyQ}]ʂj:|Z&;5zg+^]v8)Wo3Ms= 2W|dMo5`H 0 eî`e8/{F ыF"Ld (M$M:* DKGkE{3ygMOLLre,,y{X D˱?PЙRKUirPcUК"Ȉ"h3!z: M8hQ#/Bo30 0j3VLL9,,ͬlمٟrrlqr@k;Gg:?Z=={L18jTSWP=!Ґvɖ})VQ U@_uAM@P#[sYO|i96ٌ`*{XS[WW/WaDcWfm_;^< ,{f[FF6u[B$3K2S}yz 'B<3@1 a <@ ^@ x &A^ ؑ#e&-)pb^Ȏ/t1]q#{GvJן֎G?&P뿙]K'$Ѡz{p"$~)ΜH $ 0Hn$?B* UZHMԧ3S;w,cM8@Έ/}عC P-VbB P 9qݶaJh:`]NBSx]ƊπΚegt ^qYV" YbVTTl%ϲb=k`T"AryDl]nkok}!۳wu (% ***K 4$QV (C`۫Z`ih.{姴T@am>(-I@dqqMdg vq>S' >OdZY~`($q>&! CC0R7g{0_UH)('XIt y2ǒ2a!z:CͿ"m=)~h3};پ;oևZV>we ZsE~uz`"t$!b~^NֽiΜeEjz/}ť$=JPQcI3-TR$voG>wqkgv)^_( Gee;lw(D,).ur@R#1dٮ],jN>$@OAb IG _:\F &UU%e**Wm+<ě/Rd*+,IpќܖΆvu. αɯK+k AḀd(T^ٮQ"6ƒIV|U,_b__oy%CF{ 1q:52 hGKq9@͒4`z D4t)fpaƢ@RؑOpjZ: IvX!vɔMmIx7iuNUpT"VB^<7_ \3V,[n]JTCl$btڥKPRf:|~Q嫡΋7֯V:U_gկ|}%$MHE}JWb;8dڶkChx_oUͷ>݊WcEN4V2 2qD Gji$UVԛzFTJmzAġH NDŽ >> ]s`Rzd$emf :{(I2psI2$y1Q` rʖ)? pH.&#s̱E>z(zT+V~O%S E`e?|p6O6GNWmחc=1.ʧڜO|.|^' zcs\@fZ+T<581*dtR+ m߿ȦmV2V_Ҫy^ "xo/y1S6.#.m}گvM.LQj@]u9L"Qo驆 nw-tlCHE$Nܸ$ĚHÔ"iLmIJM"IP(IN3{Qb`Wc@2_L'AG%q^7&MlR++eT0%wJ58؃<_ֳ]߂߯,)γɦ^0%tNJ$7~m{A~Eыkl7fVqjttϊ4z4@>w|M[F'O6JT:i9hJŠt^G|"EH@5;$P_ZÈ4t< qKf# !j@KFީ Jwj+/PЅ(@i-6ٗ8/+p `BF#*&Eb'FzuW5ՁQa\-69ȌjD'Q `coFT4u`]_R IE;/^>.<H%( .Ê6M6os !cFXAKXaG^p4(29I;YQS+) Ss3yR9.E*3]oV%t"Iz@4D sUo͐Yc7m0ɜBw on}uNO6 ?/)^ilû>-̈%WGN=(3n &Ou$J_hS D3}1ɈZZ 8*&rڋWXKt@ƚ*S$NkUk8.嗆Ȁ c!‛ M&*P=$Ϩ+YpIn6ǟ"4,Pɫ<%j:L!BeR "OD m;^ߡHHn #e؛V5kMĉUV\ч hFb%?)/B,dV91_SLl>?wBgX'eji&GZ-5RB Hbs[]a-?RVnθ㿬x^#\Ɍ{p N&4ΖVN1[6oE䃴 HrQ5ht4@ƝlxO#Euܩm~mnmH0.#F0Lt3NP$0)MX% Cπ(!T^; PX ED"}S*# [?)Z`]ulY6Iӈ*4P6ҝǩ5:d ]pHyBÁȥ/}L BaoF3^HA}3)-A/uc 5HCMx}Ix,[ a >D@c#_/~~/Yb.1;HA)\M\@})M'udFMq:8p#KMة%OE:p  )3nh`&pcbvi&I.-q&0{Tm$Jm3''~ ^`/6s DCep(:ꖁXfRB1(k>d[ui^$T8pT=R_$pLػ68ºZ"VsMmOOp'j a=kCmni/~nb5:\`dEQfai=nko]lvij>htHPc>Ƣ=z8|/?h?$ȉ.0ū(.r =P1GHq Z3ءLw6o\>mMTɀQw'=&re\3umů.5u+,Uϻ\ U$La:nL@B3>t5_<c?*^] c.66\omR͛N N ]H7JLAv/5z}NH7*̣U' pt<OBTT)D AŒ%I XPi>q)fӆy2(Y z{n6}L3Z` y{=dwmtT&;Vly_ف_Ob&$Ɗϲ߿]U hY$Kb|O:& mWT5"ĉ)UmcڧBCv}B'􇣩0Hnme >Z`2H;vtɱYFYHEJ{?L-sd_3f:-A+FYt ad;v*\Q0"p^Nf|ZZt!p +ܮҳ9#S`CEh4ŸLFdbO0n߱v#nTp!6|C|b8<>8*\zPyyK_tɀOH? )DtVy='CѮ={_Ϸ)ZXQ?dL %^2&$zIwuVau4 }Aݱm%iE2F},GvW%i)ZFHCsiF.1.զ.U=ˬEFwivlH6U2g2J16h|%d -vp>Z8qD]dwPS{:۬UڑzkHwڷfSxhi#ؼzwbTe+:3̓y$[W|N(S= s [?𯾔mz+ʨѠ|HL(4L%6Q#Rv!iq:tkvɀLp.éC1ugz$)d+>Ξ))1NVpDDh肃 9{h?kG*#}94XQ6^V&0>e;Amٶ/lP57)^i{1 @Nv%^J!`_^t׳~5FBc"Şc^gQ9U:z"*lN??Ts*OF 4V Oڭ73.ihPA6/sӸCRFM.߹c7BVhbGҌwGrM5"5h0f℉N6|@0`mYHj2Ϥ_{{%-CLg:?:GsCnde 8lߊ. U&"qAhPAl& W_}]~+/~ͫoUUX'j¢Av뉜d?,?eZ LTf{ֵXG| -d!mcT6i ]7p P F[/ Z?U' m*Ӂ/'lӚ_6 <){PpCX.S~:v3q%,_q@x%i[8UHx%ݯu}2 C{6PްYrZ KkI~'3@@-d@@З6bϓᛃ?4WʀVƪ"@ƘQP4lμPg0TXbPo[͋_gE53Nzڛ 緿[nz]@]JF) )^i0ajԊbrg8_DtZ!tk5O`X=JGHr0U6óe]F:,cqu$\RCe!J_@H,tR4ͧCeyfga{寴e6glIe6$}RBd-vD+R7lѣy`i,܈F"8*֦RR jR9Tv3W2u(V\3E:U IDATUg[Aʇ>C?`{ ]3- .q]\S~[EwEIa7}{GmiIc S2av?䙼s xmF~T;tw`$qLj5F&MҞUDՒAl 6hw̌ 88͒ټM@٬@iV5I._ KyYTOW*OBB@Igf R"4n:EϋSQ}ֹF]ЪԨC0Mm\%UR>,tWqCXU@IJ5H@(|;:ԚU*wBRg(՜"^S֎Eo>2D#?T2w:,SXm-kKi0wT:С^$\-G\-mݲ֬]#Fd%#A$˗ 5gvf.QGW|w^vh$y<`f7$zwHL?Pq}xsIzxp֊ʌ>gQVaL:4AӧLHg(Lˣ/;/)nL efΜiO;,ڏnV\iv\ʖTו~]z-=y-[nW>.̀ L|hyH2U bŻaZ[l+='^_ъo@sߏe?|MX3Tdwtxz,*V~VM"wXFNN.j[p܈@AFI-Pmxs; SIE WtC ̃Qˎ [%*Fʪg^"b2  F>XV#׬1#[+[mڄv^]mD9͆l;5N <nzUP3gzÖq|>_h۷mtlA<_ҶKlض6Jn@nmyc tK;$0T6ve3fҀLOA Lw$ā wv؟x/O7,ZϝVzd :wz-D<\iP%M3g؜stI/xr\r_AAU]8P`lcׯ~JmܲYwZ:zҮOtx-+|LV\iz9cs.,=-8(y#!}$I:9qC)N<+PH$ǀ $+8h t(Sa xyioeXrt?#5%z~c؆G=0TX< ௕Y 7f}pTbi($QNzB0z'IXcW<7o?GWj>c 20T?ye sE5$QTe$\,u2dUetlEqpަmz͛6B4:TLfZv̪8 UI^I~O~mh5Z ηB{R#1Κ!ZG h~#M68B4xWrG+x d -wiFKJ]!d-nͶ 8Efz:S*Zus܄qkR|SDsRW@J]8 Fޜm:2Ө8Uga~:M:y5ѥz/Zb(AjW@y-R^]43Kdy_O6G W>[ο?7Mcv+4NL4yLw< S9V/CWK !o :HJd5{=OY 8#Of'5_P[K{@9V^`_5xH6]>~% 5?[%Բ/!R/Vn|8z`!~ze4%hP*ɘ&t:Y̻SR MA Bm2AdHg9쎀'I.UkT%pf _Ial.f:0H< u ^J65* .yHa?nhAߏ>\=@ą4'~rްM Wfs~Zj@IZg[#,{T55N|Pٜ:udفYl\XbA]f0 턲Ń .]̻w#)17s0He:Fz|PoevQT4wѳNR8ɀM˚5@nj;l;6(}Z/K ֮@ iHObϻ;Y3%~ߜ 'YybFAB~m{H DKFq>@d$rP^D]N\D쎯 4+tgj.}*Cc"p #nЫ |t޻&ȡQ:a % Z<;HRk9\1a(F5Ja|83WEBozF!p8ԽEvHԝ֪'ߩILׁy ˮd4?*IYKZPWo6wc?`"c%>@xi:yqtMRձYPBߡEA6^^/⻑^x Ri7(O"(/ bk2mUk5I~D?Wy[luPwS%c:eKH ;}:_o@)*.% $Hg/ux4] 4 }c/SK+z ҬZ%9?‹l%R/g1S08EQkAM ='Fr_)!K] Pzpx–㠱f\"UO:S@1L= p LJ=t3{n|+; 8U^H瀩 (wsu `1QuEtv$oKh/{W ~N!E4}b>F%m}__W~(i,^Pғ$ P"K3}0qå%Mt礨<ô0> K})ħr`yI#ti)Heۚ]=B-4?_H6mJ5]d }VO]v|PjgYh?}=c 3u1l\M`]{g.ٯ3 F朘DGR(X4B?䤔oDo "2 Q!-v>gwka$EkN+G`P960R!{rIǥH"O9M($aŶN^쿹f3ʠ[nQ-WxI ޒX(OWnl\״t @/PbN鹰Z5g)N+_4NI>nk >w9J ,t3 zu=Ț/OhP84xs W%OQ(P#x}V VٔWmoz.8KB E0sMtF$(G_ͽ{Rh=ˤ.a+?˴~ꗾjwFZz2S=՛P8#(<@( L#=::kʨlThA vh]AA?9q:,Ɏ&;} ?+: OrI09|iCd)#>/{(ט5u@Η:6/hܨ/|?"]([!~@ 6hFz[y=9 ֦4y `-'%ghMeAdxA$o|0 a @'x/~/ !E<ՒfHz)) : ;8Yň"r¼0HH/ z4P @KV <. jL|B<F= I [&}l[|)\aiN?L;Yѩ{#6/mspL.v tU*S݋B44Dc]yB0YRxe^B[ϓΟk -+PQ`ZB@8\]&K4ol{ֶ`(B;qfv7k5*?6`RH1(k4%8ũȌ0R I E  @{I}6ﴋ_ y#PBS@Bq @H# " \ vO';]Njd< )KLf?ۖ8ퟛJTZn^ pOS>[(= 7P z5A/h tWpBW#Qx&q>7Q9Qhf IDATH'߉k s‡O$yA6sG{&>uˌnvmחnz[J CKTCHA})J+-[3t2L|!cDH ;m|ں7j3=NU۟dlt[ |`VbkW9[z[DW"PHw/}(BS(jOz[j4?a>;a @prQLHaB<;i!wOd1i鴸9HRbt! J;h P`k QԶb⅋. 鲑Cڍ=:wn?@ =4=T'NiFptKM~"o'bqw?C'7Wt1.+aჴ]  zӍ$~D*Ųv5?|jIv\S  9Vul/EOYuŶ~[w)|t*&m~-]UJEߕ8M׽޷רd{mN0sp!G00bFH9 ;  G&jEQFIWV17ѓS2TKgA-7{ 6M?k Z|23R AԦ8f~ܨO@ug!N5غ"A ]d29@JgYsϴ o՗^aN=%@D󯟷m\,ǽ{eU^Cs09 2v{Ga4HWK",<U_ER!ON]ZW[-WZW79\qm>`4U5e/@\=<~"M=g|AA#hK;dUژ>-?}wtp~hgVo`7,]ciSfLk F^7j" ,3nV9`g0gˉt:umD`LI'.}R ɪYz۶^{k^k_lk}s|~y6s#Vqj9QIxГ(0|GKm_;P01H[H, cBA B[C:lG$>몗JL0`l>J*-#1A[R,UE`Y)._}Ze@Au{Es3~NB>ɗ.ЋqeAZ" қ0u)%A6?H5k%Cӈ4^YKL? ۶Tugh#,zQjܖ঩o֫܃m?ܘ|QQ1kJW(Ufo@QX>f*ajt<huϻ^_3/uZ*6gnɒ$+00_]̙rF<—FyThDPt_| 6)^5@v'zGΟ1I rb)Oci$Y$AVpU Ѻz6:]Tpu W |wꪛQn ]W[[߰O\kE3%E,'tftkeF a `s}ӰaQ}r\>7hhWE'>ܥC:h' Jz&q|jV4GS 'ghua#%yL#qEW3ۦ^<mX–S?lt(fG{tس%0]lLp5 Ґ vTmMP? 'ilƎyqrvlÚ>9LN<#f.]ZuPja,~i@R\wOu 摤}((KZm[<(9:z}0!?t#a)$d[Nr^_zH h92$ݟG,㪘es²'%ӵT}nwtΑb>U ڠgq㕩2Q Ơ(O(eE_lP)ly0H΃4١CtqꅎДp: EE9X5ІY'„#x KH35~ FHlČ5;;](9BO;p;@zz.BmGx)h~_:gw~ͪ.^lc:wǰ8$p5G#T=b(Jl aD*/T<hA& MC"Q*Jg-n.ŨR]n DFZm   O&Qbl-;n-U_QuGc.vhG`EMQOH+i(N%ٜu'כ54ѬUB\DPH@UGU QAFf{6ɮS13(>x|O#5L$`tr$Wu&.5/+龧qB#J6q_+)42Q8X1؈*Zo J=O3Z:r DUfh)iPgժilK-Pry#(&!iJ7U8:ߥJ:,ؠnF:u#Р ^ vg%ׯ ..SGlǙH؍#Prہ_}OtDtJcI@2Q\׉FGaV3ÜcQ8ŵMiUyڊϓV0dIy,s%cOy94:]pƕ̺vnҡM#U8"ٌFJܧ[+ 6>oI#GuKgx}!tMN%m ,#}pXa{6jg{pw Bo/o@M}Pu<>||wp*㡞2jRqԵ r w!?ܝ<2r6HI&eܗ{鑺/](@*ő4GsyfAp*!C@ t~-6IK"} eұv)aηNi EJB8+O2ogs?x_f WC 3`%kG/A%hN'Ji"pFk: > zS8$vttt Bs@lko ҈VDT&q9zхݧ=֭ŋ<+pQ}P91byhu)&HsuWNh 1?Pg^IMm$ӎBR?NW'((e* V+-Om+f(fۿ}jʦ^vM|Cr$ l$@Ndc"IVGDiBlO~bOr3Sa[]颾W`A J{J7I 0Bdo Ns+u # Q)``ʹH'}[X5."VۨHы@*S=5P|^\y_PI+L5XOeuP;ѮRc#BNuXHf;_F]:.N5o{6$@9ĄA!8Hg@n߳z5Fs٢xKTVtWvϛ2SghJ 5Y up`ڹ|DpQVh4,TkrG$HlG "pn? $*<}aֵ+o/+O^=!)4ű>g!q𴷿*/̚W*!x$`8GyPM`[e='TWpKfR&U 1zL\ Qj2~GYyݺuc-Z?-.a:+Zҍ\gE5aJUX X4u7ýCrGꅏr-nݺU_DqݴqNqRuy ͛6Xwv&)VB:bz7ZvxM}ӹs45w5); cM/"GT`gID]Ds˱t831HkbLn^t|_ I;Ux/4Lzb}Ǭ[\q!ÑKJ@NĔ ׽u b"Os)^.$>4bL8o?V$.a 0\x2S'(#mOUH*rȿ*{ o^n ;l6!rBwk@]b2t֭[>2?cm9;HdB.^C zzen2֍֩S: 9\J#u: :w&nrаmrx945.S:я'OY3+eLuƨ}ȼSFk]>m1t9A2-vl2TT~ԥrV)hb6?{,kCΕ恽:q瀨Jyx4@PP-4ng$hAG$-D TH88.5}+4fPԁ&@Zʗ۰ ZʅHOwg*@TR.Hރ-P9ʼnc8WWFyF;̳2tXՄ hL#ƫOo}ᇭJȳacAhP[_o.ڼeҚ&a ճx=&C%0BO$h5$MpHzd@l8Td^tX-_RcFu- Ifp\N@Ј$rlyT4= QÄHEr' ^L@T=;:uk$Lޑ̡e~YN !,@ٙeھ)peT HسBc|0NUIoR͞6Ac9iLVgeMsTtZW:@:$ ͇ ԰Imf۫k)_x'wujPiبNUʵC<3ub1sM|sΣrfN /'I [N<[/8ᬐ"8Rꓱcn suh|"2X)@$BMUpDb6'bxh4b ?/!i#aF2Dq1Iʞ@{Ψf.n+g3BǦA텲7WIJ8wU6Pӧ8$;M|tP^:ذztY~ tMHPOu ;wxD )RdF`]Tn1઴cGF1cgn+eI/-ieVT*:G`UJ3Pfv X_B-Wڨ'jт>͚t?xUNk*_Ӂf; ՚ev:,ή=fq XEڸzu֑`ӡARDfi&8J[!iKІi q T]met3 F@(wf`@26&W?iWeYQ0ZURQ0"q! `)ЀKXGA6)(ay%6OSb66X] 82yRWݯ:Ou f34_Nh f͞e[l f؀G򜉟Uևͱ͵uvFaէE" |eu$?m~kTF*j[3Jm`bÀ&S5󣥐s882q1۞]ivEv^a/[z%$_zU5<w[a $ڱSftZ- Iy|Z1<Tݚg@: htvY>gC80 $#`KP e_!BOΠ{$I/ٳg۶m[>74nIa7mm.=x)F2 ۲~@W0ɱSv.Pi핉DHEoCӰ.sI$-2 JN ^C/]jSj4KJxāNB'ɾlϣ8+}q5=đ9l˼-7kwR]S*J.9'.h[ڣtQ~M5O*JsbJ9 %9JB9Ȧ#1 +ei'yz4O>%.VlR71˘a]ĩ"01=E{􎟈]R/b;Np$ӎP,g7 22#YNjRI1Qtr)P4+J:IDE3jD+LP|m$P~<}TGh Z (swN<$߃~)9M<%,K4۶yBo0/z .s_<콌tz^t+lC^=[rt`uA z8$I:r1to"|-ޫQ{79X"=>U4>m͜9N? :}c؉NXdG"ⵯ+W`I`ι{|j7v"8Ryt ^tI,=|}J%4~H.јGɑ 8L(!Ѓ{]%(N#YI0#AۮALYd5Ȅ QAp$|!9SU|gO…3%˲R?x<,@ z޼btd 'ǎ :}v}_Uox&:30<)*Ɏsz(tL ob|<&ynLIs4Xsye2N)7zy$z^}啲I۫W=Iqh$.eڬ ?*_.EA0%>vJ~X앥-!QԡDdezO4 8!ٚQ}aVMfȷ^J{w4g~Js@QΤ./%bЛ{߄ȟ{ӎ$zmuѱTiI+NO~N_y9{^ 珪^o*wv $ͦK t 2&n5$ RҎF)F]: CEGbZChH@! G #>Qp5dp÷o!@ BRNJ=2W\hjwJ} z<޵flɤjONpbVz\-C0!?Fx D_!M1DfDzIܳ~$qa5έ^00} #y8 qFs zZ!;ƀTJI8G^eݱYku_J8F)>;SX"ۢ킋."ڜ*w ѝ ^ʲCHfےel=vZOsc;[Ѡnzopw{$DҨ&iyğ`taLLFH x9DO40Z|VOI>h_öS9b; - '_dݰv4`~P< (=!q/8R1zt`q iIG\+҂V%I%m ?.a8FVˈ$Ej䨖3f،Y<8:h$A줺dزܫ,Z8vuv/N|SܔߍV)mEZUIH ͞s6Ht{Ϟ=uo̬YC=COڑ!)ܼ[NS^$+< Wᆰrْ~X'2RcKm3 "p!!^R0 gvl%8}nv&c0"V*m5Zb! Ip%ZfO`8eEu#4Abڧ,u}Mm>]Vsj3H \8'&~W,#1pCusYE]qU>hɒeޞ8DF,e1, śA+/~7Q+И iI2%H/\x$c.a5w /TKw <#9>32c<Gތ} D+ =)I#UVj Xb<#[q('ŭ,LnZm՜'$E ꄷݟZy4qDl?X:1+iJs|9/PIucz#xoU_q8V4giwW/]IӠCȕtl/ThrHx_š0YG+7 1`,7͟#*Mwܡ"wH)5ҏ̝aAT&fw_qrWkm@i AO^/2G[ÒŋEyc˘:#G|<E{ر 2k^r,B#oXt.@)Ճ",#GCUF*C׬|w]>iq6JR[g]?5C[V$07p]סhϹ Ww1OR'O>D9{#|"aw%.̟R0٧\3;^-l>8,T}R/xD 8DbS:ixxw,HZNT"3DF4Q(z5a݆ QG'@ e(=F&p'Z(2y/̉+Fqx&qJ"@ n79` $ ,$l!8eY30fLT7J)~1i:M/{iXk+堻kw\J")|4c%ŋ^~ы$΋GGFQW:-$J><IW:!);4y)$_Ño$G:tLRgq:)rwFC]s󭷪ԟ܋WYdbir.(lظ!Y&d|F^WEI3@8ÜbFg=[ڊ_7I88]'p)$HKW!ǜCp9"Q_*8`wG"6PS"0&7H1_f8Kׯ7y٢ B>+i94w]S]t?~cO9pEUk΃0 |bDa,ba%?%WRٵ[hP~q?99b\`)6fČ<5H@>ůz*+$/_R& ^.D腡;޲R&i d(X=pwzJ(@xG4GHwdqH5uFU<ҠJ`w0p zau Fd'Ц>,UNI~c$eRý/ G}PTǽ}/$pUCfUڸ-u8OhF:86ִǙfǙ=Q#X3#ynq,O03ɴX;gIT׼޶E z a(ÈEI21 _\ $< _7>D"wģw.?=˜y3 I͠%RJ(cEj1'`ɔfkZ,rJʌ {HNHԐ\uGd[a5ڻ+H̨@ RʙUK x{K3ewG+gol25-ښخ}q1B<=CocgjI~hY`y"R|ox3,m=?(ЇlA')"ӦM_rIغu-Qtcè0 1-DO\PKqޏ9a_,ч!+$過'.~%^wS孒02 H" ͜lL䉹 $,务0Y;Q_6.j1Tt_b<={ Bc>5ZܘM JT(DwX+L/Y9A~I:T&>=ۭ̔?0i0ed[=zy~g0ɕ-Hn+8D_իpktH(8HVsL8yKD0FT d ^E@(.w1,UQ XdIΙC^Lh.LK8ՠ,ϝLUH^SY̚YiP%'iS_(wIRL܎2K;,@Padzﰒ&z.w;80 Qas $ب]55>?\un7a)Vp iv0?c~G?UVlb)iAHq42h z$bCsCvlkȨ 1 Hdz֢yb7L,ڥB}4.uetɒiP9V2ԥ-v({w 'Lޏg?Aa!! 3I6')Fk1#RsoVgy0;wNx>zc^KDdf5 AZE>p8xW]m;eW^ sB!ͼOңET@dc3[?Q:#}oɅx`J aa>M!Tk#z὚ +6Fow(ke1IZ`l:?E@YmEru㩋./:mH$&M a06u%L: A.wp7ì8j쬗$x> ̓? C80 E.*@^/2U3LO,CY<đ't~H- oi)s8ḧP=]zbEʚ>$jC$PmGTq ~̵3LSYI_ʍ;shKS6nfhY4 )XzamwMGbVm5d&4\^ nQ*%r%PdB\Y3&pܩ6Śv00%uS]XVv:d㒜ܪߧt!ü' om+BR#xأiڤq &8ڈh,A2Ql HťY?{y=k h /o}փ\0^KXts<@Z?`,3Rc>WW j(NA?EV"cta+Ze2lnp|d*APj%2Arܩp25V#mLj$o%]ZX` 8pMacPDfsG|<"씽ϭ2 DuXᨛhCvNaLDYv?3&tq;]P-hlͱ yxmw$a+d: :c{-w 5 1!ha?!}2A [M}j7hD#Ed)RV!*,P&Xw7"xUù:x'i 3\ ,3 Z3fR){HpgUxsDRp)߆;,Zt؉(.%vK3FW@~y:#:G:q.hFC0x J2J „ IDAT )_;H{pȾ4 Cg,I:m~ FH6 u"}PپBIP2 D\e :zAl6!G I/k%ך<>Oh,K0s\FxPQj+" [ I9 ;;QZ(1*+4UEg٘o>rD {Y"q\H;vlq(W\9_Voא|vQ!Ez1 pJbyޱD.HGc@, L6ئ=.le}Πpm䨻ڛ}kgШ?? /Zb@gdH] ʫs%}@0Q$xGLF7 K wޡ3`<[ygoT@M@*I'$oPs`B1 s11_؎:_9+ lSf QRlj ‹ݙC_AX =5\0R9,+;{,DUDxN]щDpYirxC10-@Mu[Ct(FXa#zD9aQfzC6,_ՖOp4 gց^t=>iC\>y޶}%IND6uFde}K[KS`gqMϷpW_v I*x-ᜳSd&eƊ @2خ)O>Zq8e;z#|SLG+\Bxh(4G֣a1g(G >\ùcBem!+Ia? `XtYظy1#Ifƺc:0>o/аPW-X_QPaJ wz:\C8:MEBA^@"̵ШrHuvR]XO;E CevFd_y[tW;]vqQ Ef i,A"&q0Cj"!d3"C] 8Dۿ_͟tKpIE!fxcy-Ñ$Cvޘ ?4H} 4 Qf 9+[%bE"PnEg3:uJ&hGWt|eJA2Sj\&(gh~5 U  )%tFDlbb$56p9 Z:W[NjڟVZ4Wv&;$]M25\wn҂>`mK5mʜ^=/YDGη.%{9C!uڞsq2iOw4$2=p0)􅂸%Hi?duZKڳ'צw W:+z/?t[@ ^Qkd:0S7K{vхZ͎'v7d!jbqF@te 4HjD}1LhR%: KUJwjns _ Zh)rEa`ĴJ|,,`fxg InĶe%fi$4da&63Wt Maًꨛ1G{kނaCپ_9DM&JUz*L7NqaŊᥗ_n.m;YFrx̢, mhl-Z(}iޜwa)Rl@l&itjFׄ1C pSG>0Z1O EC[aTRck#t[A:$-F1'ifw$L` w R! Z/xSaᒄa)RfX O@=4M^$ߍ4S\pQ@:!FԿkяjgz`X׶ƊUJ7Վ B -AH>RaHWfRiȃ4jƎ*_ 9[[ojS7i!{QfckԹhWK$H?ǟ۪clG Of2)#YӯD|G@ a=#)yzEiGbP  Dz3uJpΧ9)y"YBO-˯Fb>E?t&s@te+Id$Β¿+ 3ݹח- / ~Z9pmeX.vY`t0tu|&-0/8 Heڬ=sXU_x`1@4tew&:n$_h  X!@D&/Z0k@G'P)޴ichyeυTE)0PEr< z8bE;$d~&W8rZB@jXzp$[yȋ R/ʾ*;tQ\K8F)U3 7 NQӕFӕILǪ7"~ 8jTFLPgwsf&1a~-?)|XVPfl SIa%,DV:'q'=U1=AJ+O޲$H_@rN V*$qYʁ4/wGNS)@kwpUfFy4hu*CXQnW`.Nut`y@-\tf_5d;dAy%pEq8  0Պ+kaogJ;XA^ /}07>dW24͌IZᥜI5#Xbĸ%2@ |$Cq<<.wHH82 Kp f ɥɡ!sz?8i If ٳU螫U]#A"]5Qvm۵X2dixvGU(ϕu:x$ﭖ*)ti+yo@“O=B҆ m掼}hϠeA2I\Y-bCؖ +R TH ﳥh~Ż ӧG&58H@|-NցDGԚ4F_MGsc:DoAfB] mZ-e?OӓO V(A2?)Рx+:]@ Lm21I\js׮YC4Cnj HojSEqsX>">.NxF]{CZo1,  A;ky8gi9 /o|tthI3@Hɗz޹ɽ#DSξ. Ks<0R+-V$R߮Ϧwyk"=}ŒY/V)s\.DXYN@#O]G)Dg!J1v?#2 /l jdv]rKD|>ܬFL-(w v*4[/epJ6R|{_6|oXwkT4[[lǟqXxqS~s78E=\}d0]e%I *FcoETP,5dAʾ'GI$'~Pι$,_FcL%q#~"cNTgb=GM7Nk&ሑ}Rb _TCPf( +K. ښ*OZdF$ӾSlۺ5lCGNPZ64eZ:qᐤ}с!Eת~lAGRm7^13TlI&}} dwJȴ$L5:3NG& GVGU\ )#p&/iwYduvn0Qk 1R8g*X^1V0+C$ ݣkb@F=a-b||cŒ I+@2GDhšW=zv( Qqea]'%Ow=RapB{嗭` H<ʶQh0s{@wݥLᩎ}ա]!:XJ9M'-!bzHH.-FVߪX{7D,fQ.Q FA~'s6:;~I50\x .4-P'DDL$u}8^DTGK<߲IL:@0=(*!/w>\ib,'I=`yi{bd$̕+%$F\!u=w2vCٖ WN~| J9wF9#*P/lg۷wOh\kjKGg7jY ܳυ.\2\ygw¢EΝ;ıw타2Z# 3tJiB)<g7]U;̔H[7kBsNdϜ93'+͟Qaޒ`*FfzL"\̓bB_y8&pz-H+tZD* LmrO(b˳@ +f ["*7P4: RtnOFq Y"_޵j~CI:m2zѠ-{$-(qaqO%vkzI;|Qv$O=~Zi׾W_O?C=TʞzNWD!pdahBIwu7"T{{qO7tI=QJpF780:RH~])?L4E9_X,̉*U I"/X+߾5!}mb K',$A8OR"l=*|O{ /_T28+,q$3L4BA3 IDATΧ05Zw/%\r<ڽWxiq/L7spט1 @9iqd2x0L56na]Ev(zR[ciau@m-_+s^jr0 (|w^Kr<:šaYº;&8L("F5|iH". -* z ɼd`T9S:7s(Mѕ>IB7L<8#!։{M7C]ѹ++!dHcQ'IJIiu 0Zt gfa0DšP!4sLMr4z3V;ח, eeKo~]u }P Kd]/6op;Z`>*˝ڍ*w=iJ$ςHG:uF's`??̞3'KN֠_edi,UV:8ZaB%LW )&@ٳgG "1cN!6./24ĿCĤ35i?n_&Gm<أW dݒ<%-WjDXS;Tdtx.\fV&H ^9FZ[&.e|Rnm@_4 Z^e 4d)@2s5Z62Q`E2&2*a ˡ1) IH,aA0r4*Α(cC8"g,H:?t5566GX:ê5jVs4䝗x0D'5۵}[x񇽢và]:{ƮDI uɓåx|^ (IHmܴJ۶P}Ns|?UMX)(( oS}Q͞5;\-\`boy)$a(+.]mF}kRdцf_oY%|}jқ Y:&$(_3PCC0}V9%vT%bc(qW2Ѐ/tK5 < MHpir =!Q2j eg̚5v jL5ŹڢasO?!ZyFtQ=%pLYqqiJ$ML/ۏn:eqv$[!٧Wtի%aN]QoH;ZG9WB=31E9ړpjd (_oT[*T}3TQ M}{eHyԀ.Z:,[\ <7IZH0(Ȝ$՟47 ;up2eT\ztߪuhԼpdP$yۦ>Y pM(RD =aJ$<0GmI3τ)OHzוW/z/yƏ+pxtl<en6`b WUwiD>UpY. ,R'h%Zj_ox^1F_4J>)wZ-ƀ=D]s] _B:_dE2=g/szǥ{=0veeϒn>~!Iq0!_l("9j8CHqp@úG?qlAVօ}4lܸ1;Be2z9[帞G9 ʲAWb1Ϧʾߒ~4g㏇ǟxBYZ>õ]tq]NaC$7mem:H7,Q(.J>v,zyR$sS' uuŅEV6^}G#Mh6n;Y'zU0JF5$$M(NJ#$/{t7V=i[tivcx\|.& @Dąމ"L}Eo nIqamڲ61NrND\=jH`jh;)e9sUaXfB]6nX6H`b-Q[#N`HxNhs{&&ؑ,^W\k葇Ȃw8u7hu+Ë jp@Ṗj:@q6m@qst-;O'iNEK.8ϩ k.?`ٲ2KwGm_R?gyVJ;LCظ₾+552\Y$ d"Zڢ|Dxi-},5)[6o <iOT> /{YG4K/3Lׯb=֓Hf4Iz nUFޣEweM QkȄ1b b  Yp`Q<[X ~~|d,Sg!Z?>򈆫˯2+Mk/2fxNNP~f4͔9ׇy01yJɽ;gpD׮Yl@7tz#H[:~ue Tyw Uñ4Ø_$zZ 0`8^t΍7,`8~!-\14RCbՆK٬=' b1QUG6z#WIig :e ]4$zM*:?px駬"sLiFeO=WPj=y`䙟=op$w]qU]Q}?ha]%ɒR[R5c:Jy:$O"%\>z.`k:H顃#5 RI[䢺c!7ChU X|[ ]#yavYʰDb'U2y<`(cȩ]sW5k7}塿~i8AY׉IL͒;4g6u=whg1,J/GE엢3ܓϞ5'lg^qpMp@a{;m A0M#&V2'o)jvtMd1KXnnG'?h9&=SX/vV,?XXlZd :0 8Q+u(]_C2U# ҜsZz%)$UzƀmnV͑"H[8q3ZN@utL],NO(!5P_Оsԣy+ <$zM˥tٲަ6C5ypPe5ք+VX<Y [(Cql.rvOֈQ*M勿꼦|A{#5=gHq(gi?k׵uǏl^5HvWZhPUKr-A1 =xD85 0}ƱXaq!T+O\C4|Ao;t|OUyTҕK.M%@obe ߾ǭ\"LXM\R(q j DxĎ7MH3d= ~Etq!>YtW,9t`K/jo.[󗳵1a:&Y:oqگ>sܰtabP/"4P-4+n l_u\(OE^NӋ3p.wE+!醾,zcht K55[\A8;ڭinIm:mjEm}3_)CKx?3RNɉ$M&Neg)i9gT$Iڐ[c=N~7£ X'X#rݸ.-7%^J]&xbBG.H&ProT.8YtWS-M\Whu\LOznfP>?=.s:dBD\&'@9LIv3*BM<(+䁸<:9OymqkL/ hֈҟ^!&FGʗ;{$$)n2JIr Y- c1g=RRcmMmX/{,s0=G e@E؇=s_8KCNMzn(Wtﴅ#EMzڳ6!~- :uy<-A Y[niOWL Fc^R >T‰4}Jr99Ο#d1cADT@04=@ٻ=䄾=F1 ^Eq;Ja}ɂ!q)m&N HO5E3I_2;"GV;$גV8:6F1癛R0yR2r=yC9hd,rUٙV9`(յu``7OMt#5b}jcRajw5*䃽k7nܡeuXh]Xbu^\1YSSI $Ie[1 ? != $(\E003Cvu# Vj:BqߨDYL;! ڸ?\sML)=kV@<$Hy%Yq'P2YAJ?/w]2'-[>EW6ԊcU0"A8].e@g>SP\wW^~D$#j(+59Cmfc@wNT9,&YL.a`! URy=9 9LCpˀ;^-t\It:R"L A>4ksfhQFSِ3Scprl٥HG w5@ޗFt"?3ԁe>HS6*#HIuɧ nt8ׅlkBՔZFz7cWN=q]tt|$sS8tޟl*>y5-,>o(@flrTFs_y"&gk5+ݨ{KRQIX0OwĴ(OoV#0 F$0kJdHKY#d@ Oq9ѿY<5prZ#4ۢ#Ie4p59UUhjoڼɋ*G|5腺9"euo3LW#twvA6A/4cmB !IWh 3ca$M3nI O~,? c/ oy:N)!w7KUszߩv F2 9lo #d[&?a')˳MA׷Pz QTk1*6%1Y&/Gy6kd,{e-f ĝj$ 49:fä6M[:ٶK囬-EqʮCP82y*ȩ#qG, 'GY+ Lg$,<ۗ<ӷ])cpnU(8g?^|9LewG󵿔zH>`sHhpwLѭnŨLEɡٷgD7 $0d4Y+ @f]+0Ž@ Q!9Ew66{ʾ=[%Zi_zvMטaOudIiUĽFIi3[+BW*,?3̈́E#trkp/C&2\ddksKYasݪ(= 屘"i">{dG(e&DwEH84}Heu%!7{,Y~/f86tM):T0VYt$4pp: Aa[7rZN) Fǥ+eY_y=>oٺ%zk }*]Jc=S WYBf2T#H(\E@C䵸8?՞-|٩{}ѡ42FpJ_ 3ĞɹS.fu`X["iRaMƳbt));7 imO`ULKb?6m^{%J|@_j[c֋dh=[p$}Ko( %*YfޒH[FwIP)HxDtƙL sՇ/7D; LLDBbO\|Pb%ag a $r \i 3M1O%p5c]<BUq-ݺ^H!Käb[H'189XC3F@S'HqN76rleY;sUz fJ#vBTy)Noph"$0+<'tFP/24Ć I!ͮ)nݺ%({'$GJ5q6!1jEr^%A7=NEAA 0\rIgU~0dA#)\aH~m v#x`X0GF P׈‹nj Z^f~2J̕G cb.hrTD5hN?utH@o57 A2F"VFjL?q)8G D`9^ E?|{:kr0qw&*̓e2td@GK άW1Xf%3T;6BU픨2( 3Oi;"]1ojfN S^”TYita?%R88 D+_7ӆ+$yW`h=/y"^0%JUʣ@wH52Q`Ii(Zp8"gΧ1=b;FvLĘh(`K=͇kK8n iHgZcgܸah_CwV_.a,H&4#؜/1Z"izLKz Hz&jjB₝:e{N_O?Lhޡ sELL-D nfLB۶mMXJZs\VH LJa:sRCeg9N›gHeXG4Dщj e@*٦XآX}ۮCS?c3C1Ydq;t|Ji=42hźBGll Z勒#a(aѡSQ^_[E$D vhsφIJ'),OW.ZD(M[66֠1QZ(c>]^LAݤM~tfÄ lC =Ӹ0ac" DxDs xp&Z1+5u5-lT~2zb2Yz1\:y _1s~3-Ny? /!*ۍP%j*RzSP4 W,iy#T;a>Vg#Oj%LYtuT_5\TR9|%dYy%pɑ=o9=RW: yϟ=Γ^6>`-ˡ1AYyAx3ǮWG}>,<4L!8hcaT)CEd.JSNbC](3u4FGr`[QG1Rpp 0;/=tHa R! ZI2 !&&Šgׁ/]Fljz> Q**x[EMdP=mVФ:ʖfӡ* aZN<^5/^o*8ҹIGA0\amr&'HH% ,5m4A%Wi9['_ghT&9cƏ Bމ @Va(,9a)x8a嚵>)ouC's&N ~V_@ dc 0Jfs5^5 S K3iZArL]E][Ш>7*l΃š埇;x]+[1EzSr])B]+p?xĠM%p& *jåNSdž*阶>Ε-cG\,FT,S~Da޵HvԓO#bĂqɒfئm(GōlK'Dc2y+9N6Hn#Co4?9MЋE0*V PXaRfŽ静Rb߮c\G[orr`p0$İ~ʲ-0 bo{C%-IGuˌH}uEEuxY+;eZIbsTջCua!cCs8$DD;>+# JN'#-aVjjES0T#S ZsI߲#u*v+6vK8T'TBd+P#OClSS2Qe$skEEOJ[emFTh ,)V3݀I*h3kCC}ysEur3kBq">ޘ  ,aH -F{:HI~CMl'qHgΚ-%a5sb~#(B)_g1 aFi~TIۜ ;ZJ6 HwRVR8'9֊jvҪC>ŲSTa/P*YY-̳M|L*Q5])nS=Tf< 5RTGY)wjXʢҝ {4w2\-xˬ;V\tmH{:-=7e: j ?J؊ܭCS_0axԷT&?Hؔ6e"W!I״Sq{Z}yORFjQm3诃B2"}Q|A&,WKce~vl ?TRKJ\s I$'Q.R;':VkRbJxIsg -MtEax.na<^ k`?/jtCן+RXao:J+&p .Fies|P!;>ܲYCSaJҡ.OX)Ʃ uhZa%G@Zx>XMx/;ΑzHPa_Hè`vNܧ͇ $G)u#YgϚ.)wb&Թ wHfV9⽏kw(l%ϤqꓺiohY^&Xg+IaU=UQ? l!hһɊeP#93' i&zMTo[-`Iu#.{s? $"h!-JTqF܋㝿q$5>,]$lBL&%@ oQ=gvE*0_X?y?UƩ{:?kppjѳO?c7nA@A1Sap06g3RYA6WkE%49E2܈܏ngoeqћXF. z pOțϳȬY$R>'KhMpZ! NbQRz]- 3/X> K7dj%N9n!`_z2 8qHݻ8 gC,sB>q];ث)I:!jMOҒȿe\Ϊ0T҉8Ey!8:(1cyQ;޼qX,_E~g &M߻g ul8⠈GB|-)uW/R[U❋وյh_=qssU. zXحtX HOc1bv2>Gv sfٯ.鳀7DWUՍ^۝~QGJB&?y|jM79SO>맫Pm5-vt$ks4[|vxeE Lq8`< C$h@U1R/[߶{G9 xVWORi+/}Hn9gGI(,Y У ]lj= LZYxB);@z*oJGRF=u촽*wMgS57ySva;a\ ױlWx}*S-Dj qȮ'-/ʅS^۸h#eY8>m<ӾUku''r_x@7Jsݓ'OqNMsQ\ IM -B|┯{Ǚ/bY^ Cy /bm$}U;^h34m șSbތ$Hll PgNuTXk/}w~?7L3A^:XzpUy$H.]fjuPxG9cA]@# Lj~k[LI?R82N(GE+Z!MsWa?0/ck9L 4<ɓ~FUvy>̈*lӁkik  IDAT'sIY&I ޫX]Cp%'dYaHN:0ze8ISm]uT8 @@7bATH1C.wR-'ϙ(F0b뇿7etTa2yWk]x{$Mm"Xz:K*-zighmvcS~fG!@= ;e$f8io:1Avd;DuP PFh P8~L?ϯz-[A?ɪ?/m;MsH3N/|@UXPB<NvǬQ!]N?"\zEq$伬Psql l@OCGw-VV0o F& DYke0UZ`L76Aʚߏ\I$H1b3F=ٷ|Xϋ? S{O& LeǂO?~wo7mQlu=]t 7jNitM{ aӛ < =~@| 3Njx īޏBi[P䈋̗1ZFY}6,.KM顇ķb~XO/":zaBiC٘!<?/mb2-1>03T.] F7-%\ ڤiv ,eOjpqS=ʆ'}߲7I|Ÿa06Wc*+W^LdygYqL7,Hz!Dt k?kɢ&}Ӫ2B*CM!=< Ã|_er8y*6GЌ7m˖/P2X:8R`Vhil}z܆dY߯9y왧 NCɬzwOݲ?.m_=$ɟkkYsE֫ P,\ߪK:^ Qm(_ s/  cQ,<1]<2|( ̏d_ʗmݺuVYk105M#@n銕n>*-oT%M 41Oܭ<]e7 =27tK ;:+k_ъ/9PTCE/_ָڵ6oH@xf,Wشi3 C驳gl"WFIAU2 6xB0 s (q([*pRJld2u˫ A@gAs~ vyI$OG(a+;Fz?G]fE"Nb1r&L)A^%!#>2~//P0Wҏ,)\픒fƍc`!ߐxZp4"ď@@7rHz @ kC&x9z k뮹td@9 ѭt#6{172Ԉc\vKnfa;h:Ycoy->n*6C$bntK." ٲr-p]6l+r"j7Ξ5 }w6>2H1@c Îvb*/DQ~y"sdRbF c!^crDc|%[v6hs6\FqxBO`"G6p|taHAF`DX>m}N@FwYjEFnj3̰Cxy^3*9u.\z  Հ;'=#@\@A<'|3Uڎ׬UcI%GE.k%vѠ\slaQLR8sy쑼~2/Ǔ `2{ǒƕSr;@D=xxr&l ܫ鉃׮7|kl㵲O{|c~]kɼ\oC| ['208RzG?@_#q6Luƀ0٬:UpU?dҖmzS)?P@ҡH:mߦA+!L|o6vlcľbMZق-g.AU$cF)?(h~٬FwD{%>޿@-MM[d-[ܗҿRH@^wfuÇiۮλ[N, vʎ&#I+r%i3[TlV/D'/+,(>[T3)ex9C1uD8XLru@Rm~Kd~WhG |o֞NVx7D8r:if[qs*۟2u-]Ė)#H]{,Obh+#4FɦlvSQo{qn{?`/dQ~.d=ϣ6p ׸Fŧ?){饗m:,p13wͣSqsf ^@k;95 Qp08t;B 6*|Gd;a @gπe쥋8Ԇdx'*g>V,9DI H'&QVGFp8ʹsOdxosp[! ÿ~ 9H8cE<0Uy6CT/qNrVg8_@7OOK伻=G@($0g$%nr+|b܌~;m8{ӱc'I7 xM?aoɓTC&v7Y]\9Sc<^Ư_RŖB ħpaH Ynqt`L0р-J8a#%~ϝ;ߞ{} #{at0 r6|9NnHo( S5jLJf[&'w2?y7’Rba:5kHIC<dScڵ ek[$x) i L?Q_| '>DA)$#xA9{ɯ V@aFe^~-{R|VFefh(.1xY}1:[@W>Eϱ<9᥽3jH4"Fklj۶3ND|x̵ڻמE[=؏9ptՑs#NﴱEK6Yy,HgJJ9 !jywnjeY?Ok&eGeЀbHnw1ARi5Am!W_Y>ao|ѯj [lZlh veYsP <{md.2e^̨uL Qq(ihrE^qx@֕V@PˠRFyxD 4uVZP!s1pnf˃T c[O}Ȝ&,Ss@%.MK#qSy1ʅu$1_Gz۹iy6ۺu(O=nW_xZIѾ!%3F[y|J4C{<S.stEѻFc|Y5@2dr` rZ@1 .!ef4p npm+rY j0 *ďxfKt圗PEf7J^_q .j7ߨ#%0%< 3}5*vQ1yr Sgp9--$vkC[{Hq1)P9k?z(?+Ivp(Ja̓bl0B<$/Nה O2E`U*/Nw8ب:cBc|6~4R&-efcݘ%K_llOo{*ҡ22 9A /j-/ăO؋gb&hW\N*.\ ܵ&b{B/8R\o$bw1A|ʮw+Ta%&MJs簫ltnpz'rpjqhwu[Ul۶ժV{peevUL73+) mG{#ƍ(:!Gډǝ|L{Μ:m{d3列{3@/@S^ =JisJԮo#5 P PQ$AǺ_-]۟J|[`8!'! "1}uu 4Nvrp^A>p28|bgcw7mf;"U?-`+c Щ[(v]m=bFFˬvE {̿lH@ʶ4omP7w,ɒ. $6b2h*MT+D;?_րSHx:v.yH)M޲J6@b3 y 7j&۩"M_4j<{!WgoUhh̝cӴf-NS2:%l]=}ݧDr|l*>+ԻpB"klmq9. @8LXG=Qua?(q{oتկ"(Ba,rtGWu<+g x,GHcB`Dq"DW|re:$ mAR,k@+Qk=D^iJ%2%\s̮={T*ZGα&Mt8Y}qMFm[;V.=^A, qs.|;"zeqxh٠ /f7(_"\x-irN)NIU"cc X2'A3͛6i/]ʏN\\i0@α .q~ljҪߩw =d Jr]G1q멪/jZ'CHh/i#*sL 9tFaÈ|QF%o8OHb>tp5!]`U'i)漢~mngnjr.=/_ܫ}9ɾRˁDAW&iOr6 z\ Gx-7w5X|qk"@@C4~=CyH0 / b _aMuv.DQFh 6gZ(G5Gɦb{cqgsƞ={n}Uj1skayh3M-\폢.28fƕZs 4jy7?KjI}v7 +mn'e:AKfG?{INТ> cޏL pq)8ɋK $eF96I_wP IDATh^ŁSX^%d;g5o1c2E~VhzEߙo}bE >HJc{jK_bCJ?d_OWq32i欰7_QjS1+pjUti͛Ƙ467ʼP!q $,3ќV'qx䨑r:^9{{F[3)RЌ\dƴ:XF@}$bs;JL@4Wƞׯ}RH˧RNbYK$be^06 o93=1b1, 4$Wᙩ(#4-d>_:tH yh_A1fq9ZhU{ߐ΂LySylN8~l.ʞ{Vyf)$[L$j"qҡ1{ő3grweyQ+|>O9J R Nkp<_gNuM}蒥`6ǎ\),qsk?>(S<Ìb9O҉c+ 1rJ 5MBYAjW27# (33%Rc>4M8wJƜs u<&| $/wQ%Cqgi3Y>wsmXbB~0$wkn& Yq:GqgexҞ<06xܹoR&EMbQbz}*iN5O_R#՞@<5sր3* 3CؒQa?:]XO},1&&׾7yH.8:JcO I"9Mmix|kg] p3x o(_;曛K)۲ȉCD<".ƵMFvm۱~;S2"`mN wmUʡZ*y\.xɋ^~zGYacb񢅋˛f} +ɼ0R>I8ה4OW?Q#KM݉SxsP K9 ZO'O#ؤnLx~G)v-{n8hFUy͘Q/p!DpIz zg>cq _ 1纃y1M?g짩Qhq)ªl7TϓqӦMu|r'k~xP>?{T%'TH2*0Q jgX{|-7< uܧ}b0KJ&;jǽqҀeqP`hsmIy2 v*ppR6sg*TW(@rX~F[|P'rR9=i'=qrQ;믻^{ߵm4jM&Drb+Yg&G3gEt@E i۴F~nK+3|GtS}@S 6ͷ|@=uJysLkoymٶELsypq,G.(N"AF|8a/m4,=w{c\z(@2{!nNLmoys<*eX}@"wP0u0#P14*H6$Dl*khmop9"&0X:I3HJ\fgDQ~ǻd7<&,1Wlds}]yț%K]Ͻ}|dԤɄ2 E@R ףF2[([gVFk9ub"H7oC-1}C O-)k͛J)@"C:IU HGfi|.]"mp8̚8 ұEp "VԴ[GaSiH5";p 5pi-(B)HR$]A5dpS#iȹB Q sB38G\ ۵6;sCfy)S\b:GdyC SlRu:& @G"A\9SbLWaUAt\y5 nDU/MӠ{/X93Cc[z}Cr1NJ&BsS ĥ(@2^) %|w)Wcy>y8d?J @v-v뭷~oo+PN$0(PLN!Q`@@L\b5k۶v:mk= ގ*}i Ѱa@[n@7ǧD(@'ʤQ[{][jmڻ{0 q0A&IAα׬;~oށ?KҔ8@.=N:!!mF@vΏ?a^hň#Y]!ͺhgæFq")ۈR92_ $/Rnxν5U/h[lv#GQqΩSH-eoZ4:T'[a1Fx X?Z̆{׻YS]9bb$u:&  $K,"0ƛ\Qcy֭[lͶl>~EquIKӕ= ]w<r̘6AwTm+% K}tL/HR)y(" a) pNNƖn#2*IuuP.(]r}KTJw(Vtޒ j:FA>,;hZMVw*(Q`(~f3O@ pѪ;9 =j8QH:rVfm:Vfm[[}nQRIY3&y.9rY@%(&g*ATG7 N3g9NMcdZ#G߰V(Y;m RY;SH$HVک.@z#vwV@7;#CǣOS 䠓8UPJ n8-w1tErH޺W|;5/Y(Pi $4S}Rz /FT ̤"AM?A~ݤ5s/D R d +gd7]#Fs&6(:e_|ҹKW,_ϐ~*@ rrlQ%j/êXo-@ҥ ơ "tr vbPN@)r۩b4DH我Njk뜜.2׸b8ew@ 5Z}ⰵUGB@sJ$EHʏ?hE bpKlmTCW̫dCԖW \% Rsx+ospl5B;h7@5l$ssG}\e\-vTWuL-IvCkīXrd"ǁNs}O'q07$fm؁{EmDNm@p^S=yI(6.)N>N~F/ {{:GAe3j#N?C $̫؃~O\C% wV XJMwPZ۞3^+ip@P$]r'r+i{L$o'"1N'">p~7Pnf@7[}4+Vى/ yLpZA|pl}=! ]I"8;K]-}nRiˎ>|+>h1y+b9mD"Hi2 tb._;a]h(.QDG\ Z;loRHEΑsDit5@T-vTJPZ8a>1Z~A3tvA(2VܬR3>ݿvJ54$H@Ii )yͪUK}xFgm8-wS\\&!Wܱ^#lPzu1)=M3NPs88EŋdҽW"2Ɋ}7VX]5k6Vr"mrGٞ~َ>ݹiq5|T@P rm/8HՊy6Cѷ׸t%l >v8(tss3G_f .:fXaI$kշda>79pRvWP:]~{vٱd@xԍ >hu#^WH 9@OJ][;' .cN$3܎.:lCzDd h)S7pf;-GPPu/jT*p]n`OpS+eUcFٰsk?q:myy3Fk liI N#GCU=[g[Oɹ#ZN G# t:xW ÜgZ[s\/SNEsY:.\2~kV5Q7Tť0(CӣFjk:%"ژYˎm?:'8TMt,9^~`HHVuUpG6sæ_wa%+dW~FaMR6DuzhY$SumMlYʹF8RFhSHS qyjsj](:WPh۽3Ose KsKIG^8?6sq<*Z6JvxyTSc(>\\Z݃%P63h#W\琧#=RH(Ce2ԣgv85U_\NdS t6=o2+1GLqyqkwO٩WwɦTVB A@[*̅t,cոZ{CM`nE6zIePRkó()@2R"QyI:8,>k?!-IDAT%?έ?+y `S:N H(ImĜi6&.s]Mf?5V%PO8gGݰ:ϝ֣Z9J5WиvU6zNQ7K ąbPn;𜒸io 9 $# 08U9>pEN Dn2Jɝ-^$9|BlJ\T]^G^geУ ʶ,>˧j"p^u~ ioX gRJ3ysO!(DR leõu8A9ȁ^2ܖ$N&,VV8RH]Y4:uzb"<ˁG`!:k}G~^/7Y-6YȖN0=|!=zw $Ai3Bk G͖vO-J)N0E+[2XD.H 2p|W28N[,Q# RuV&U p9W8&E^O!Q,!H P! dӋiIA^'En1J"@q\|x98%̹Ev:'$B@@Ht,P0 J^Ep8lc7`>l X@ ١yL4.blLxzJ/[r9B"?!\0B@@z@8V@@@z@^n% $ $ $L} Q Q Q eIENDB`uqfoundation-pyina-c629452/docs/source/pyina.rst000066400000000000000000000014011467660040300217300ustar00rootroot00000000000000pyina module documentation ========================== ez_map module ------------- .. automodule:: pyina.ez_map .. :exclude-members: + launchers module ---------------- .. automodule:: pyina.launchers .. :exclude-members: + mappers module -------------- .. automodule:: pyina.mappers .. :exclude-members: + mpi module ---------- .. automodule:: pyina.mpi .. :exclude-members: + mpi_pool module --------------- .. automodule:: pyina.mpi_pool .. :exclude-members: + mpi_scatter module ------------------ .. automodule:: pyina.mpi_scatter .. :exclude-members: + schedulers module ----------------- .. automodule:: pyina.schedulers .. :exclude-members: + tools module ------------ .. automodule:: pyina.tools :exclude-members: +ceil, np, wait_for uqfoundation-pyina-c629452/docs/source/requirements.txt000066400000000000000000000000261467660040300233440ustar00rootroot00000000000000numpy pox dill pathos uqfoundation-pyina-c629452/docs/source/scripts.rst000066400000000000000000000005441467660040300223060ustar00rootroot00000000000000pyina scripts documentation =========================== ezpool script ------------- .. automodule:: _ezpool .. :exclude-members: + ezscatter script ---------------- .. automodule:: _ezscatter .. :exclude-members: + mpi_world script ---------------- .. automodule:: _mpi_world :exclude-members: +launch, alias, set_master, set_workers, kill_all uqfoundation-pyina-c629452/examples/000077500000000000000000000000001467660040300174505ustar00rootroot00000000000000uqfoundation-pyina-c629452/examples/hello_mpi4py.py000077500000000000000000000040511467660040300224320ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE # # original source code modifed from mpi4py-1.3.1/demo/helloworld.py # helloworld.py: Copyright (c) 2013, Lisandro Dalcin. # helloworld.py: All rights reserved. """ # Parallel Hello World, with mpi4py # To run: alias mpython='mpiexec -np [#nodes] `which python`' mpython hello_mpi4py.py """ from mpi4py import MPI import sys size = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() name = MPI.Get_processor_name() sys.stdout.write( "Hello, World! I am process %d of %d on %s.\n" % (rank, size, name)) """ Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ # EOF uqfoundation-pyina-c629452/examples/hello_pyina.py000077500000000000000000000013751467660040300223360ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE __doc__ = """ # get pyina to say 'hello' # To run: alias mpython='mpiexec -np [#nodes] `which python`' mpython hello.py """ class HelloApp(object): """ Get pyina to say hello """ def __call__(self, *args, **kwargs): from pyina import mpi print("hello from mpi.world.rank --> %s " % mpi.world.rank) return if __name__ == "__main__": app = HelloApp() app() # End of file uqfoundation-pyina-c629452/examples/machines_raw.py000077500000000000000000000016031467660040300224650ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE __doc__ = """ # print rank - hostname info # To run: alias mpython='mpiexec -np [#nodes] `which python`' mpython machines_raw.py """ def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) if __name__ == '__main__': try: from pyina.mpi_scatter import parallel_map import pyina world = pyina.mpi.world hostnames = parallel_map(host, range(world.size)) if world.rank == 0: print('\n'.join(hostnames)) except: print(__doc__) # end of file uqfoundation-pyina-c629452/examples/mpd_trace.py000077500000000000000000000011671467660040300217700ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ run some basic tests of the MPI installation """ import subprocess command = 'mpiexec -info' print("\nlaunch: %s" % command) subprocess.call(command, shell=True) command = 'mpiexec -n 4 hostname' print("\nlaunch: %s" % command) subprocess.call(command, shell=True) # End of file uqfoundation-pyina-c629452/examples/mpi_bcast.py000077500000000000000000000072561467660040300220000ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE doc = """ A basic demonstration of low-level MPI communication. To launch: mpiexec -np 4 `which python` mpi_bcast.py """ import mystic import logging from time import sleep from numpy import array logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') class SimpleApp(object): def __call__(self): from pyina import mpi, ensure_mpi import random from mystic.models import mogi; forward_mogi = mogi.evaluate ensure_mpi(size=2, doc=doc) world = mpi.world size = world.size stat = mpi.MPI.Status logging.info("I am rank %d of %d" % (world.rank, size)) master = 0 EXITTAG = 999 if world.rank == master: # let's say I have a set of NJOBS points to test NJOBS = 100 assert (EXITTAG > NJOBS) # all the "evaluation points" are the same, so broadcast to workers eval_at = array([[1,2],[2,3]]) recv = world.bcast(eval_at, master) # the universe of params params = [[random.random() for _ in range(4)] for _ in range(NJOBS)] # now farm out to the workers numsent = 0 for worker in range(1, size): logging.info("MASTER : First Send: Sending job %d to worker %d" % (numsent, worker)) world.send(params[numsent], worker, numsent) numsent = numsent+1 # start receiving for i in range(NJOBS): logging.info("MASTER : Top of loop") status = stat() message = world.recv(status=status) sender = status.source anstag = status.tag logging.info("MASTER : Received job %d from worker %d" % (anstag, sender)) #sleep(3) if (numsent < NJOBS): # send next job logging.info("MASTER : Sending job %d to worker %d" % (numsent, sender)) world.send(params[numsent], sender, numsent) numsent = numsent + 1 else: # done logging.info("MASTER : Sending DONE signal to worker %d" % (sender)) world.send("", sender, EXITTAG) else: eval_at = world.bcast("", master) #logging.info("Rank %d has message %s" % (world.rank, eval_at)) for iter in range(99999): # receive job logging.info(" WORKER %d, iteration %d." % (world.rank, iter)) status = stat() param = world.recv(source=master, status=status) tag = status.tag if tag == EXITTAG: logging.info(" WORKER %d: is done." % world.rank) return logging.info(" WORKER %d receiving job %d ... running" % (world.rank, tag)) res = forward_mogi(param, eval_at) # send result back to master logging.info(" WORKER %d done running job %d, send results back to master" % (world.rank, tag)) #sleep(2) world.send(res, master, tag) return # main if __name__ == "__main__": app = SimpleApp() app() # End of file uqfoundation-pyina-c629452/examples/mpi_comm.py000077500000000000000000000024421467660040300216270ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ # simple test of mpi communication # To run: mpiexec -np 4 `which python` test_comm.py """ import logging logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s', datefmt='%a, %d %b %Y %H:%M:%S') class SimpleApp(object): def __call__(self): from pyina import mpi world = mpi.world logging.info("I am rank %d of %d" % (world.rank, world.size)) if world.rank == 0: for peer in range(1, world.size): message = world.recv(tag=17) print("node %d of %d: received {%s}" % (world.rank, world.size, message)) else: s = "My message is this: I am node %d" % world.rank logging.debug("%s" % s) #XXX: set up a port with mpi4py? world.send("%s" % s, dest=0, tag=17) return # main if __name__ == "__main__": app = SimpleApp() app() # End of file uqfoundation-pyina-c629452/examples/mpi_simple.py000077500000000000000000000022361467660040300221660ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ # Testing pyina.mpi.world.recv # To run: mpiexec -np 4 `which python` mpi_simple.py of python mpi_simple.py """ import logging import pyina class SimpleApp(object): def __call__(self): from pyina import mpi stat = mpi.MPI.Status world = mpi.world logging.info("I am rank %d of %d" % (world.rank, world.size)) if world.rank == 0: for peer in range(1, world.size): status = stat() message = world.recv(tag=17) print("node %d of %d: received {%s}" % (world.rank, world.size, message)) else: s = "My message is this: I am node %d" % world.rank logging.debug("%s" % s) world.send(s, 0, 17) return if __name__ == "__main__": app = SimpleApp() app() # End of file uqfoundation-pyina-c629452/examples/mpi_simple2.py000077500000000000000000000020011467660040300222360ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ # Testing pyina.mpi.world.bcast # To run: mpiexec -np 4 `which python` mpi_simple2.py python mpi_simple2.py """ import logging class SimpleApp(object): def __call__(self): from pyina import mpi world = mpi.world logging.info("I am rank %d of %d" % (world.rank, world.size)) root = 0 if world.rank == root: str = "hello world" nn = world.bcast(str, root) print("Master has: %s " % nn) else: nn = world.bcast("", root) print("Worker (%d) has: %s " % (world.rank, nn)) return if __name__ == "__main__": app = SimpleApp() app() # End of file uqfoundation-pyina-c629452/examples/nodes.py000077500000000000000000000011031467660040300211300ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE __doc__ = """ # get all nodes to report # To run: alias mpython='mpiexec -np [#nodes] `which python`' mpython nodes.py """ from pyina import mpi world = mpi.world print("Node (%d) of %d " % (world.rank, world.size)) # End of file uqfoundation-pyina-c629452/examples/pypi.py000077500000000000000000000025351467660040300210130ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE __doc__ = """ # The standard MPI example, computes the integral # # Integrate[4/(1+x^2),{x,0,1}] # # numerically, and in parallel. # To run: python pypi.py # A few warnings: # - Evaluating this integral is a horrible way to get the value of Pi # - Uniform sampling (or the trapezoidal rule, as implemented here) is # a horrible way to get the value of the integral # # For speed, use scipy instead, which provides the bindings to quadpack. import scipy.integrate scipy.integrate.quad(lambda x: 4.0/(1+x*x), 0, 1) """ from numpy import arange # default # of rectangles n = 20000 integration_points = (arange(1,n+1)-0.5)/n def f(x): return 4.0/(1.0+x*x) #from pyina.launchers import MpiScatter as Mpi from pyina.launchers import MpiPool as Mpi if __name__ == '__main__': work = Mpi(2) out = work.map(f, integration_points) from pyina import mpi if mpi.world.rank == 0: print("approxmiate pi : ", sum(out)/n) print("calculated on %d nodes " % work.nodes) # end of file uqfoundation-pyina-c629452/examples/pypi_pmap.py000077500000000000000000000026071467660040300220300ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE __doc__ = """ # The standard MPI example, computes the integral # # Integrate[4/(1+x^2),{x,0,1}] # # numerically, and in parallel. # To run: alias mpython='mpiexec -np [#nodes] `which python`' mpython pypi_pmap.py # A few warnings: # - Evaluating this integral is a horrible way to get the value of Pi # - Uniform sampling (or the trapezoidal rule, as implemented here) is # a horrible way to get the value of the integral # # For speed, use scipy instead, which provides the bindings to quadpack. import scipy.integrate scipy.integrate.quad(lambda x: 4.0/(1+x*x), 0, 1) """ from numpy import arange # default # of rectangles n = 20000 integration_points = (arange(1,n+1)-0.5)/n def f(x): return 4.0/(1.0+x*x) #from pyina.mpi_scatter import parallel_map from pyina.mpi_pool import parallel_map if __name__ == '__main__': out = parallel_map(f, integration_points) from pyina import mpi if mpi.world.rank == 0: print("approxmiate pi : ", sum(out)/n) print("calculated on %d nodes " % mpi.world.size) # end of file uqfoundation-pyina-c629452/examples/test_ezmap.py000077500000000000000000000027171467660040300222070ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import MpiScatter, MpiPool def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) print("Evaluate 10 items on 3 nodes using a worker pool:") pool = MpiPool(3) res1 = pool.map(host, range(10)) print(pool) print('\n'.join(res1)) print('') print("Evaluate 10 items on 3 nodes using scatter-gather:") scat = MpiScatter(3) res2 = scat.map(host, range(10)) print(scat) print('\n'.join(res2)) print('') print("Evaluate 5 items on 2 nodes using a worker pool:") pool.nodes = 2 res3 = pool.map(host, range(5)) print(pool) print('\n'.join(res3)) print('') print("Evaluate 5 items on 2 nodes using scatter-gather:") scat.nodes = 2 res4 = scat.map(host, range(5)) print(scat) print('\n'.join(res4)) print('') #NOTE: bug? does worker pool perform correctly when nnodes > range ??? print("Evaluate 5 items on 10 nodes using worker pool:") pool.nodes = 10 res5 = pool.map(host, range(5)) print(pool) print('\n'.join(res5)) print('') print("Evaluate 5 items on 10 nodes using scatter-gather:") scat.nodes = 10 res6 = scat.map(host, range(5)) print(scat) print('\n'.join(res6)) # end of file uqfoundation-pyina-c629452/examples/test_ezmap1.py000077500000000000000000000015111467660040300222570ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import MpiScatter def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) print("Evaluate 10 items on 1 node (w/ 1 ppn) using scatter-gather:") scat = MpiScatter('1:ppn=1') res1 = scat.map(host, range(10)) print(scat) print('\n'.join(res1)) print('') print("Evaluate 10 items on 1 node (w/ 2 ppn) using scatter-gather:") scat.nodes = '1:ppn=2' res2 = scat.map(host, range(10)) print(scat) print('\n'.join(res2)) print('') # end of file uqfoundation-pyina-c629452/examples/test_ezmap2.py000077500000000000000000000021251467660040300222620ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import MpiScatter, MpiPool def play(Q): id, l = Q import numpy return "3 x %d = %d" % (id, numpy.sum(l)) def play2(id,l): import numpy return "3 x %d = %d" % (id, numpy.sum(l)) args = [ (i, range(3)*i) for i in range(5) ] arg1 = [ i for i in range(5) ] arg2 = [ range(3)*i for i in range(5) ] print("Using 12 nodes and a worker pool...") print('Evaluate a function that expects a n-tuple argument "map(f,args)"') pool = MpiPool(12) res1 = pool.map(play, args) #res1 = map(play, args) print(pool) print('\n'.join(res1)) print('') print('Evaluate a function that expects n arguments "map(f,arg1,arg2)"') res2 = pool.map(play2, arg1, arg2) #res2 = map(play2, arg1, arg2) print(pool) print('\n'.join(res2)) # end of file uqfoundation-pyina-c629452/examples/test_ezmap3.py000077500000000000000000000014741467660040300222710ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import Mpi def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) print("Explicitly using the MPI launcher, we will execute...") pool = Mpi(4) print("10 items on 4 nodes using a worker pool:") res1 = pool.map(host, range(10)) print(pool) print('\n'.join(res1)) print('') print("10 items on 4 nodes using scatter-gather:") pool.scatter = True res2 = pool.map(host, range(10)) print(pool) print('\n'.join(res2)) # end of file uqfoundation-pyina-c629452/examples/test_ezmap4.py000077500000000000000000000015371467660040300222720ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import Mpi #XXX: should not have to define "func" within mapped function #from mystic.models import rosen as func def host(coeffs): from mystic.models import rosen as func return "rosen%s = %s" % (coeffs, func(coeffs)) print("Evaluate an imported function (the rosenbrock function)...") print("For 10 items on 4 nodes, using the default mapping strategy") params = [(i,i,i) for i in range(10)] pool = Mpi(4) res = pool.map(host, params) print(pool) print('\n'.join(res)) # end of file uqfoundation-pyina-c629452/examples/test_ezmap5.py000077500000000000000000000021571467660040300222720ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import Mpi #XXX:: can fail with NameError: global name 'func' is not defined #XXX:: can fail with RuntimeError: maximum recursion depth exceeded #from mystic.models.poly import chebyshev8cost as func def host(coeffs): from mystic.models.poly import chebyshev8cost as func return "Chebyshev%s = %s" % (coeffs, func(coeffs)) params = [(i,0,-2*i,0,4*i,0,-2*i,0,i) for i in range(10)] pool = Mpi() print("Evaluate the 8th order Chebyshev polynomial...") print("Using 'dill' for 10 combinations over 4 nodes") pool.nodes = 4 res1 = pool.map(host, params) print(pool) print('\n'.join(res1)) print('') print("Using 'dill.source' for 10 combinations over 4 nodes") pool.source = True res2 = pool.map(host, params) print(pool) print('\n'.join(res2)) # end of file uqfoundation-pyina-c629452/examples/test_ezmap6.py000077500000000000000000000016161467660040300222720ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import SerialMapper from pyina.schedulers import Torque from pyina.mpi import _save, _debug #_debug(True) #_save(True) def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) print("Submit a non-parallel job to torque in the 'productionQ' queue...") print("Using 5 items over 1 nodes and the default mapping strategy") torque = Torque(queue='productionQ', timelimit='20:00:00', workdir='.') pool = SerialMapper(scheduler=torque) res = pool.map(host, range(5)) print(pool) print('\n'.join(res)) # end of file uqfoundation-pyina-c629452/examples/test_ezmap7.py000077500000000000000000000016521467660040300222730ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import Mpi from pyina.schedulers import Torque from pyina.mpi import _save, _debug #_debug(True) #_save(True) def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) print("Submit an mpi job to torque in the 'productionQ' queue...") print("Using 15 items over 5 nodes and the scatter-gather strategy") torque = Torque('5:ppn=2', queue='productionQ', timelimit='20:00:00', workdir='.') pool = Mpi(scheduler=torque, scatter=True) res = pool.map(host, range(15)) print(pool) print('\n'.join(res)) print("hello from master") # end of file uqfoundation-pyina-c629452/examples/test_ezmap8.py000077500000000000000000000015561467660040300222770ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.launchers import TorqueMpiPool as Launcher from pyina.mpi import _save, _debug #_debug(True) #_save(True) def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) print("Submit an mpi job to torque in the 'productionQ' queue...") print("Using 15 items over 5 nodes and the worker pool strategy") pool = Launcher('5:ppn=2', queue='productionQ', timelimit='20:00:00', workdir='.') res = pool.map(host, range(15)) print(pool) print('\n'.join(res)) print("hello from master") # end of file uqfoundation-pyina-c629452/examples/test_launch.py000066400000000000000000000042601467660040300223350ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from pyina.mpi import defaults from pyina.launchers import SerialMapper, Mpi, TorqueMpi from pyina.launchers import all_launches from pyina.schedulers import Torque def test_launches(): # default launch commands for all launchers print("***** defaults for all launchers *****") print(all_launches()) print("**************************************", "\n") def test_launcher(): # configured launch commands for selected launchers serial = SerialMapper() print("non-python serial launch:", serial) settings = {'python':'', 'program':"hostname"} print(serial._launcher(settings), "\n") print("serial python launch:", serial) defaults['program'] = "tools.py" defaults['progargs'] = "12345" print(serial._launcher(defaults), "\n") qsub = Torque() serial.scheduler = qsub print("scheduled serial launch:", serial) settings = {'program':"tools.py", 'progargs':'12345'} print(serial._launcher(settings), "\n") mpi = Mpi() print("non-scheduled parallel launch:", mpi) print(mpi._launcher(settings), "\n") qsub.nodes = '4:ppn=2' mpi.nodes = mpi.njobs(qsub.nodes) print("scheduled parallel launch:", mpi, "| Torque") print(qsub._submit(mpi._launcher(settings)), "\n") mpi.scheduler = qsub print("scheduled parallel launch:", mpi) print(mpi._launcher(settings), "\n") _mpi = Mpi(scheduler=Torque(nodes='4:ppn=2')) print("scheduled parallel launch:", _mpi) print(_mpi._launcher(settings), "\n") _mpi = TorqueMpi(nodes='4:ppn=2') print("scheduled parallel launch:", _mpi) print(_mpi._launcher(settings), "\n") qsub.nodes = 1 serial = SerialMapper() print("scheduled serial launch:", serial, "| Torque") print(qsub._submit(serial._launcher(settings)), "\n") if __name__ == '__main__': test_launches() test_launcher() # EOF uqfoundation-pyina-c629452/examples/test_pmap.py000077500000000000000000000022521467660040300220220ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE doc = """ # Tests parallel, master-worker. Version 0 # To run: (use #nodes >= 2) alias mpython='mpiexec -np [#nodes] `which python`' mpython test_pmap.py """ # pick either mapping strategy from pyina.mpi_scatter import parallel_map #from pyina.mpi_pool import parallel_map if __name__ == "__main__": from pyina import mpi, ensure_mpi world = mpi.world ensure_mpi(size=2, doc=doc) def func(input): import time from pyina import mpi world = mpi.world time.sleep(0.0001) return "-%d" % world.rank inputlist = [] if world.rank == 0: inputlist = [0] * 300 for i in range(20): if world.rank == 0: print("iteration %d" % i) out = parallel_map(func, inputlist, comm = world) if world.rank == 0: print(''.join(out)) # End of file uqfoundation-pyina-c629452/examples/which.py000077500000000000000000000025561467660040300211370ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE __doc__ = """ # check which python mpirun is executing # To run (in parallel): mpiexec -np 1 python which.py """ import sys print((sys.version)) print((sys.executable)) try: import mpi4py print(("mpi4py %s is installed" % getattr(mpi4py, '__version__', ''))) except ImportError: print("mpi4py not installed") exit() except: print("mpi4py install broken") exit() try: import pathos print(("pathos %s is installed" % getattr(pathos, '__version__', ''))) except ImportError: print("pathos not installed") exit() except: print("pathos install broken") exit() try: import numpy import dill import pox print("all dependencies are installed") except ImportError: print("all dependencies not installed") exit() except: print("dependency install broken") exit() try: import pyina print(("pyina %s is installed" % getattr(pyina, '__version__', ''))) except ImportError: print("pyina not installed") except: print("pyina install broken") # End of file uqfoundation-pyina-c629452/pyina/000077500000000000000000000000001467660040300167525ustar00rootroot00000000000000uqfoundation-pyina-c629452/pyina/README000066400000000000000000000027561467660040300176440ustar00rootroot00000000000000This package contains the beginnings of a parallel-mapping implementation for a mpi-based launcher. This package was adapted from the pyina branch of mystic. It requires: - mpi4py (see http://code.google.com/p/mpi4py) - dill (part of pathos; see http://dev.danse.us/trac/pathos) ez_map provides a parallel map that hides mpi within a "map" and "launcher" - ez_map: writes mapped_function to temporary sourcefile (.py) - ez_map2: writes mapped_function to temporary picklefile (.pik) parallel_map provides a parallel map that exposes mpi to the user - mpi_pool: sends jobs one-by-one to available nodes - mpi_scatter: divides jobs evenly; sends once to each node ############################################################################## WARNING: To run pyina code, you _must_ have your console configured for MPI. * Start mpd $ mpd & * Configure master and slaves $ mpi_world.py -slaves [node1,node2,node3] NOTE: It is convienent to set a shell alias for the launch of the mpi-python jobs. Something like the following (for bash): $ alias mpython1='mpiexec -np 1 `which python`' $ alias mpython2='mpiexec -np 2 `which python`' $ ... NOTE: There's also a convienent script to tear down your mpi environment. * Kill all $ mpi_world -kill NOTE: If jobs exit uncleanly, you may need to run some cleanup scripts. * Clean up $ /opt/mpich/gnu/sbin/cleanipcs $ cluster-fork /opt/mpich/gnu/sbin/cleanipcs $ cluster-fork killall python # end of file uqfoundation-pyina-c629452/pyina/__init__.py000066400000000000000000000032001467660040300210560ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE # author, version, license, and long description try: # the package is installed from .__info__ import __version__, __author__, __doc__, __license__ except: # pragma: no cover import os import sys parent = os.path.dirname(os.path.abspath(os.path.dirname(__file__))) sys.path.append(parent) # get distribution meta info from version import (__version__, __author__, get_license_text, get_readme_as_rst) __license__ = get_license_text(os.path.join(parent, 'LICENSE')) __license__ = "\n%s" % __license__ __doc__ = get_readme_as_rst(os.path.join(parent, 'README.md')) del os, sys, parent, get_license_text, get_readme_as_rst # launchers import pyina.launchers as launchers import pyina.schedulers as schedulers # mappers import pyina.mpi as mpi # strategies import pyina.mpi_scatter as mpi_scatter import pyina.mpi_pool as mpi_pool # tools from .tools import * # backward compatibility parallel_map = mpi_pool parallel_map.parallel_map = mpi_pool.parallel_map parallel_map2 = mpi_scatter parallel_map2.parallel_map = mpi_scatter.parallel_map #import ez_map #import mappers def license(): """print license""" print(__license__) return def citation(): """print citation""" print (__doc__[-491:-118]) return # end of file uqfoundation-pyina-c629452/pyina/__main__.py000077500000000000000000000100311467660040300210420ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE # # helper script to setup your mpi environment __doc__ = """ setup/query/kill the MPI environment Notes: Commandline options are: * ``--help`` [prints this message] * ``--workers nodes`` [set mpi world (nodes is a list of worker nodes)] * ``--fetch N`` [get rank and names of 'N' worker nodes] * ``--kill`` [tear down mpi world] ``'mpd &'`` must be run before setting the worker nodes. Examples:: $ mpi_world --workers n00 n01 n02 n03 seting up mpi... $ mpi_world --fetch 4 Rank: 0 -- n00.borel.local Rank: 1 -- n01.borel.local Rank: 3 -- n03.borel.local Rank: 2 -- n02.borel.local """ # --alias nnodes set bash aliases for mpiexec (nnodes is X in '-np X') from subprocess import Popen, PIPE, STDOUT popen4 = {'shell':True, 'stdin':PIPE, 'stdout':PIPE, 'stderr':STDOUT, \ 'close_fds':True} MASTERINFO = [] def launch(command,quiet=True): "launch a os.system command; if quiet, don't grab the output" print("launch: %s" % command) p = Popen(command, **popen4) p.stdin.close() if quiet is True: outstr = None else: outstr = p.stdout.readlines() p.stdout.close() #print "result: %s" % outstr return outstr def alias(nnodes): "set a bash shell alias to configure mpiexec to run python on nnodes" node = str(nnodes) alias = "mpython%s='mpiexec -np %s `which python`'" % (node,node) command = "alias %s" % alias print(command) raise NotImplementedError #FIXME: alias doesn't stick to user's console try: launch(command) except OSError: pass return def set_master(): "get master info" # launch('mpd &') #FIXME: doesn't work! try: outstr = launch('mpdtrace -l',quiet=False) master,ip = outstr[0].split() master,port = master.split("_") MASTERINFO = [master,int(port)] except: err = "did you run 'mpd &' first?" raise (Exception, err) return MASTERINFO def set_workers(nodelist,masterinfo=MASTERINFO): "run mpd on worker nodes" host = str(masterinfo[0]) port = str(masterinfo[1]) for node in nodelist: command = "rsh %s mpd -h %s -p %s &" % (node,host,port) launch(command) return def kill_all(): "kill the mpi world" launch("mpdallexit") #outstr = launch("ps | grep 'rsh'",quiet=False) #for line in outstr: # print line return def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) if __name__=="__main__": import sys from pyina.launchers import MpiPool if sys.argv[-1] == "--kill": print("killing all...") kill_all() elif len(sys.argv) > 2: if sys.argv[1] == "--workers": print("seting up mpi...") MASTERINFO = set_master() nodes = sys.argv[2:] nodes = [node.strip('[()]').strip(',').strip() for node in nodes] #nodes = nodes.strip('[()]').split(',') set_workers(nodes,MASTERINFO) #elif sys.argv[1] == "--alias": # print "setting up mpi python..." # nodes = sys.argv[2:] # nodes = [node.strip('[()]').strip(',').strip() for node in nodes] # for node in nodes: # alias(int(node)) elif sys.argv[1] == "--fetch": nnodes = int(sys.argv[2]) try: pool = MpiPool() pool.nodes = nnodes hostnames = pool.map(host, range(nnodes)) print('\n'.join(hostnames)) except: # "--help" print(__doc__) else: # "--help" print(__doc__) else: # "--help" print(__doc__) # End of file uqfoundation-pyina-c629452/pyina/ez_map.py000077500000000000000000000333421467660040300206070ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ The ez_map function is a helper to parallel_map to further simplify parallel programming. Primarily ez_map provides a standard interface for parallel_map, and facilitates running parallel jobs with serial python. Usage ===== A call to ez_map will roughly follow this example: >>> # get the parallel mapper >>> from pyina.ez_map import ez_map >>> # construct a target function >>> def host(id): ... import socket ... return "Rank: %d -- %s" % (id, socket.gethostname()) ... >>> # launch the parallel map of the target function >>> results = ez_map(host, range(100), nodes = 10) >>> for result in results: ... print(result) Implementation ============== A parallel application is launched by using a helper script (e.g. `ezrun.py`) as an intermediary between the MPI implementation of the parallel map (e.g. `pyina.mpi_pool.parallel_map`) and the user's serial python. The system call that submits the mpi job is blocking. Reasons are:: 1) If the main program exits before the parallel job starts, any temp files used by ez_map will be lost. 2) User is supposed to want to use the return value of the map, so blocking at the result of map shouldn't be a big hinderance. 3) If we were to allow the call to be asynchronous, we would need to implement some kind of 'deferred' mechanism or job monitoring. Argument movement for the argument list and the returned results are pickled, while the mapped function is either saved to and imported from a temporary file (e.g. `pyina.ez_map.ez_map`), or transferred through serialization (e.g. `pyina.ez_map.ez_map2`). Either implementation has it's own advantages and weaknesses, and one mapper may succeed in a case where the other may fail. """ defaults = { 'progname' : 'ezscatter', } from pyina.mpi import defaults as ezdefaults ezdefaults.update(defaults) from .launchers import launch, mpirun_tasks, srun_tasks, aprun_tasks from .launchers import serial_launcher, mpirun_launcher, srun_launcher from .launchers import aprun_launcher, torque_launcher, moab_launcher from .schedulers import torque_scheduler, moab_scheduler HOLD = [] sleeptime = 30 #XXX: the time between checking for results #def ez_map(func, arglist, nodes=None, launcher=None, mapper=None): def ez_map(func, *arglist, **kwds): """higher-level map interface for selected mapper and launcher maps function 'func' across arguments 'arglist'. arguments and results are stored and sent as pickled strings, while function 'func' is inspected and written as a source file to be imported. Further Input: nodes -- the number of parallel nodes launcher -- the launcher object scheduler -- the scheduler object mapper -- the mapper object timelimit -- string representation of maximum run time (e.g. '00:02') queue -- string name of selected queue (e.g. 'normal') """ import dill as pickle import os.path, tempfile, subprocess from pyina.tools import which_strategy # mapper = None (allow for use of default mapper) if 'mapper' in kwds: mapper = kwds['mapper'] if mapper() == "mpi_pool": scatter = False elif mapper() == "mpi_scatter": scatter = True else: raise NotImplementedError("Mapper '%s' not found." % mapper()) ezdefaults['program'] = which_strategy(scatter, lazy=True) # override the defaults if 'nnodes' in kwds: ezdefaults['nodes'] = kwds['nnodes'] if 'nodes' in kwds: ezdefaults['nodes'] = kwds['nodes'] if 'timelimit' in kwds: ezdefaults['timelimit'] = kwds['timelimit'] if 'queue' in kwds: ezdefaults['queue'] = kwds['queue'] # set the scheduler & launcher (or use the given default) if 'launcher' in kwds: launcher = kwds['launcher'] else: launcher = mpirun_launcher #XXX: default = non_mpi? if 'scheduler' in kwds: scheduler = kwds['scheduler'] else: scheduler = '' # set scratch directory (most often required for queue launcher) if 'workdir' in kwds: ezdefaults['workdir'] = kwds['workdir'] else: if launcher in [torque_launcher, moab_launcher] \ or scheduler in [torque_scheduler, moab_scheduler]: ezdefaults['workdir'] = os.path.expanduser("~") from dill.temp import dump, dump_source # write func source to a NamedTemporaryFile (instead of pickle.dump) # ezrun requires 'FUNC = ' to be included as module.FUNC modfile = dump_source(func, alias='FUNC', dir=ezdefaults['workdir']) # standard pickle.dump of inputs to a NamedTemporaryFile kwd = {'onall':kwds.get('onall',True)} argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir']) # Keep the above return values for as long as you want the tempfile to exist resfilename = tempfile.mktemp(dir=ezdefaults['workdir']) modname = os.path.splitext(os.path.basename(modfile.name))[0] ezdefaults['progargs'] = ' '.join([modname, argfile.name, resfilename, \ ezdefaults['workdir']]) #HOLD.append(modfile) #HOLD.append(argfile) if launcher in [torque_launcher, moab_launcher] \ or scheduler in [torque_scheduler, moab_scheduler]: jobfilename = tempfile.mktemp(dir=ezdefaults['workdir']) outfilename = tempfile.mktemp(dir=ezdefaults['workdir']) errfilename = tempfile.mktemp(dir=ezdefaults['workdir']) ezdefaults['jobfile'] = jobfilename ezdefaults['outfile'] = outfilename ezdefaults['errfile'] = errfilename # get the appropriate launcher for the scheduler if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().mpirun elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().mpirun elif scheduler in [torque_scheduler] and launcher in [srun_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().srun elif scheduler in [moab_scheduler] and launcher in [srun_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().srun elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().aprun elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().aprun elif scheduler in [torque_scheduler] and launcher in [serial_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().serial elif scheduler in [moab_scheduler] and launcher in [serial_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().serial #else: scheduler = None # counting on the function below to block until done. #print 'executing: ', launcher(ezdefaults) launch(launcher(ezdefaults)) #FIXME: use subprocessing if launcher in [torque_launcher, moab_launcher] \ or scheduler in [torque_scheduler, moab_scheduler]: import time #BLOCKING while (not os.path.exists(resfilename)): #XXX: or out* to confirm start time.sleep(sleeptime) #XXX: wait for results... may infinite loop? subprocess.call('rm -f %s' % jobfilename, shell=True) subprocess.call('rm -f %s' % outfilename, shell=True) subprocess.call('rm -f %s' % errfilename, shell=True) # debuggery... output = function(inputs) #subprocess.call('cp -f %s modfile.py' % modfile.name, shell=True) # getsource; FUNC=func #subprocess.call('cp -f %s argfile.py' % argfile.name, shell=True) # pickled list of inputs #subprocess.call('cp -f %s resfile.py' % resfilename, shell=True) # pickled list of output # read result back res = pickle.load(open(resfilename,'rb')) subprocess.call('rm -f %s' % resfilename, shell=True) subprocess.call('rm -f %sc' % modfile.name, shell=True) modfile.close(); argfile.close() # pypy removes closed tempfiles return res #def ez_map2(func, arglist, nodes=None, launcher=None, mapper=None): def ez_map2(func, *arglist, **kwds): """higher-level map interface for selected mapper and launcher maps function 'func' across arguments 'arglist'. arguments and results are stored and sent as pickled strings, the function 'func' is also stored and sent as pickled strings. This is different than 'ez_map', in that it does not use temporary files to store the mapped function. Further Input: nodes -- the number of parallel nodes launcher -- the launcher object scheduler -- the scheduler object mapper -- the mapper object timelimit -- string representation of maximum run time (e.g. '00:02') queue -- string name of selected queue (e.g. 'normal') """ import dill as pickle import os.path, tempfile, subprocess from pyina.tools import which_strategy # mapper = None (allow for use of default mapper) if 'mapper' in kwds: mapper = kwds['mapper'] if mapper() == "mpi_pool": scatter = False elif mapper() == "mpi_scatter": scatter = True else: raise NotImplementedError("Mapper '%s' not found." % mapper()) ezdefaults['program'] = which_strategy(scatter, lazy=True) # override the defaults if 'nnodes' in kwds: ezdefaults['nodes'] = kwds['nnodes'] if 'nodes' in kwds: ezdefaults['nodes'] = kwds['nodes'] if 'timelimit' in kwds: ezdefaults['timelimit'] = kwds['timelimit'] if 'queue' in kwds: ezdefaults['queue'] = kwds['queue'] # set the scheduler & launcher (or use the given default) if 'launcher' in kwds: launcher = kwds['launcher'] else: launcher = mpirun_launcher #XXX: default = non_mpi? if 'scheduler' in kwds: scheduler = kwds['scheduler'] else: scheduler = '' # set scratch directory (most often required for queue launcher) if 'workdir' in kwds: ezdefaults['workdir'] = kwds['workdir'] else: if launcher in [torque_launcher, moab_launcher] \ or scheduler in [torque_scheduler, moab_scheduler]: ezdefaults['workdir'] = os.path.expanduser("~") from dill.temp import dump # standard pickle.dump of inputs to a NamedTemporaryFile modfile = dump(func, suffix='.pik', dir=ezdefaults['workdir']) kwd = {'onall':kwds.get('onall',True)} argfile = dump((arglist,kwd), suffix='.arg', dir=ezdefaults['workdir']) # Keep the above return values for as long as you want the tempfile to exist resfilename = tempfile.mktemp(dir=ezdefaults['workdir']) ezdefaults['progargs'] = ' '.join([modfile.name,argfile.name,resfilename, \ ezdefaults['workdir']]) #HOLD.append(modfile) #HOLD.append(argfile) if launcher in [torque_launcher, moab_launcher] \ or scheduler in [torque_scheduler, moab_scheduler]: jobfilename = tempfile.mktemp(dir=ezdefaults['workdir']) outfilename = tempfile.mktemp(dir=ezdefaults['workdir']) errfilename = tempfile.mktemp(dir=ezdefaults['workdir']) ezdefaults['jobfile'] = jobfilename ezdefaults['outfile'] = outfilename ezdefaults['errfile'] = errfilename # get the appropriate launcher for the scheduler if scheduler in [torque_scheduler] and launcher in [mpirun_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().mpirun elif scheduler in [moab_scheduler] and launcher in [mpirun_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().mpirun elif scheduler in [torque_scheduler] and launcher in [srun_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().srun elif scheduler in [moab_scheduler] and launcher in [srun_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().srun elif scheduler in [torque_scheduler] and launcher in [aprun_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().aprun elif scheduler in [moab_scheduler] and launcher in [aprun_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().aprun elif scheduler in [torque_scheduler] and launcher in [serial_launcher]: launcher = torque_launcher ezdefaults['scheduler'] = scheduler().serial elif scheduler in [moab_scheduler] and launcher in [serial_launcher]: launcher = moab_launcher ezdefaults['scheduler'] = scheduler().serial #else: scheduler = None # counting on the function below to block until done. #print 'executing: ', launcher(ezdefaults) launch(launcher(ezdefaults)) #FIXME: use subprocessing if launcher in [torque_launcher, moab_launcher] \ or scheduler in [torque_scheduler, moab_scheduler]: import time #BLOCKING while (not os.path.exists(resfilename)): #XXX: or out* to confirm start time.sleep(sleeptime) #XXX: wait for results... may infinite loop? subprocess.call('rm -f %s' % jobfilename, shell=True) subprocess.call('rm -f %s' % outfilename, shell=True) subprocess.call('rm -f %s' % errfilename, shell=True) # read result back res = pickle.load(open(resfilename,'rb')) subprocess.call('rm -f %s' % resfilename, shell=True) modfile.close(); argfile.close() # pypy removes closed tempfiles return res if __name__ == '__main__': print("simple tests are in examples/test_ezmap*.py") # end of file uqfoundation-pyina-c629452/pyina/launchers.py000077500000000000000000000610711467660040300213200ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ This module contains prepared launchers for parallel execution, including bindings to some common combinations of launchers and schedulers. Base classes: SerialMapper - base class for pipe-based mapping with python ParallelMapper - base class for pipe-based mapping with mpi4py Parallel launchers: Mpi - Slurm - Alps - Pre-built combinations of the above launchers and schedulers: TorqueMpi, TorqueSlurm, MoabMpi, MoabSlurm Pre-configured maps using the 'scatter-gather' strategy: MpiScatter, SlurmScatter, AlpsScatter, TorqueMpiScatter, TorqueSlurmScatter, MoabMpiScatter, MoabSlurmScatter Pre-configured maps using the 'worker pool' strategy: MpiPool, SlurmPool, AlpsPool, TorqueMpiPool, TorqueSlurmPool, MoabMpiPool, MoabSlurmPool Usage ===== A typical call to a pyina mpi map will roughly follow this example: >>> # instantiate and configure a scheduler >>> from pyina.schedulers import Torque >>> config = {'nodes'='32:ppn=4', 'queue':'dedicated', 'timelimit':'11:59'} >>> torque = Torque(**config) >>> >>> # instantiate and configure a worker pool >>> from pyina.launchers import Mpi >>> pool = Mpi(scheduler=torque) >>> >>> # do a blocking map on the chosen function >>> results = pool.map(pow, [1,2,3,4], [5,6,7,8]) Several common configurations are available as pre-configured maps. The following is identical to the above example: >>> # instantiate and configure a pre-configured worker pool >>> from pyina.launchers import TorqueMpiPool >>> config = {'nodes'='32:ppn=4', 'queue':'dedicated', 'timelimit':'11:59'} >>> pool = TorqueMpiPool(**config) >>> >>> # do a blocking map on the chosen function >>> results = pool.map(pow, [1,2,3,4], [5,6,7,8]) Notes ===== This set of parallel maps leverage the mpi4py module, and thus has many of the limitations associated with that module. The function f and the sequences in args must be serializable. The maps provided here... <<< FIXME >> functionality when run from a script, however are somewhat limited when used in the python interpreter. Both imported and interactively-defined functions in the interpreter session may fail due to the pool failing to find the source code for the target function. For a work-around, try: <<< END FIXME >>> """ __all__ = ['SerialMapper', 'ParallelMapper', 'Mpi', 'Slurm', 'Alps', 'MpiPool', 'MpiScatter', 'SlurmPool', 'SlurmScatter', 'AlpsPool', 'AlpsScatter', 'TorqueMpi', 'TorqueSlurm', 'MoabMpi', 'MoabSlurm', 'TorqueMpiPool', 'TorqueMpiScatter', 'TorqueSlurmPool', 'TorqueSlurmScatter', 'MoabMpiPool', 'MoabMpiScatter', 'MoabSlurmPool', 'MoabSlurmScatter'] from pyina.mpi import Mapper, defaults from pathos.abstract_launcher import AbstractWorkerPool from pathos.helpers import cpu_count from pyina.schedulers import Torque, Moab, Lsf import logging log = logging.getLogger("launchers") log.addHandler(logging.StreamHandler()) class SerialMapper(Mapper): """ Mapper base class for pipe-based mapping with python. """ def __init__(self, *args, **kwds): Mapper.__init__(self, *args, **kwds) self.nodes = 1 # always has one node... it's serial! return __init__.__doc__ = Mapper.__init__.__doc__ def _launcher(self, kdict={}): """prepare launch command for pipe-based execution equivalent to: (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} """ mydict = self.settings.copy() mydict.update(kdict) str = """%(python)s %(program)s %(progargs)s""" % mydict if self.scheduler: str = self.scheduler._submit(str) return str def map(self, func, *args, **kwds): return Mapper.map(self, func, *args, **kwds) map.__doc__ = ((Mapper.map.__doc__ or '')+(_launcher.__doc__ or '')) or None def __repr__(self): if self.scheduler: scheduler = self.scheduler.__class__.__name__ else: scheduler = "None" mapargs = (self.__class__.__name__, scheduler) return "" % mapargs pass #FIXME: enable user to override 'mpirun' class ParallelMapper(Mapper): #FIXME FIXME: stopped docs here """ Mapper base class for pipe-based mapping with mpi4py. """ __nodes = None def __init__(self, *args, **kwds): """\nNOTE: if number of nodes is not given, will try to grab the number of nodes from the associated scheduler, and failing will count the local cpus. If workdir is not given, will default to scheduler's workdir or $WORKDIR. If scheduler is not given, will default to only run on the current node. If pickle is not given, will attempt to minimially use TemporaryFiles. For more details, see the docstrings for the "map" method, or the man page for the associated launcher (e.g mpirun, mpiexec). """ Mapper.__init__(self, *args, **kwds) self.scatter = bool(kwds.get('scatter', False)) #XXX: hang w/ nodes=1 ? #self.nodes = kwds.get('nodes', None) if not len(args) and 'nodes' not in kwds: if self.scheduler: self.nodes = self.scheduler.nodes else: self.nodes = cpu_count() return if AbstractWorkerPool.__init__.__doc__: __init__.__doc__ = AbstractWorkerPool.__init__.__doc__ + __init__.__doc__ def njobs(self, nodes): """convert node_string intended for scheduler to int number of nodes compute int from node string. For example, parallel.njobs("4") yields 4 """ return int(str(nodes)) #XXX: this is a dummy function def _launcher(self, kdict={}): """prepare launch command for pipe-based execution equivalent to: (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} """ mydict = self.settings.copy() mydict.update(kdict) str = """%(python)s %(program)s %(progargs)s""" % mydict if self.scheduler: str = self.scheduler._submit(str) return str def map(self, func, *args, **kwds): return Mapper.map(self, func, *args, **kwds) map.__doc__ = ((Mapper.map.__doc__ or '')+(_launcher.__doc__ or '')) or None def __repr__(self): if self.scheduler: scheduler = self.scheduler.__class__.__name__ else: scheduler = "None" mapargs = (self.__class__.__name__, self.nodes, scheduler) return "" % mapargs def __get_nodes(self): """get the number of nodes in the pool""" return self.__nodes def __set_nodes(self, nodes): """set the number of nodes in the pool""" self.__nodes = self.njobs(nodes) return # interface nodes = property(__get_nodes, __set_nodes) pass class Mpi(ParallelMapper): """ """ def njobs(self, nodes): """convert node_string intended for scheduler to mpirun node_string compute mpirun task_string from node string of pattern = N[:TYPE][:ppn=P] For example, mpirun.njobs("3:core4:ppn=2") yields 6 """ nodestr = str(nodes) nodestr = nodestr.split(",")[0] # remove appended -l expressions nodelst = nodestr.split(":") n = int(nodelst[0]) nodelst = nodestr.split("ppn=") if len(nodelst) > 1: ppn = nodelst[1] ppn = int(ppn.split(":")[0]) else: ppn = 1 tasks = n*ppn return tasks def _launcher(self, kdict={}): """prepare launch command for parallel execution using mpirun equivalent to: mpiexec -np (nodes) (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} """ mydict = self.settings.copy() mydict.update(kdict) #if self.scheduler: # mydict['nodes'] = self.njobs() str = """%(mpirun)s -np %(nodes)s %(python)s %(program)s %(progargs)s""" % mydict if self.scheduler: str = self.scheduler._submit(str) return str def map(self, func, *args, **kwds): return ParallelMapper.map(self, func, *args, **kwds) map.__doc__ = ((ParallelMapper.map.__doc__ or '')+(_launcher.__doc__ or '')) or None pass class Slurm(ParallelMapper): """ """ def njobs(self, nodes): """convert node_string intended for scheduler to srun node_string compute srun task_string from node string of pattern = N[:ppn=P][,partition=X] For example, srun.njobs("3:ppn=2,partition=foo") yields '3 -N2' """ nodestr = str(nodes) nodestr = nodestr.split(",")[0] # remove appended -l expressions nodelst = nodestr.split(":") n = int(nodelst[0]) nodelst = nodestr.split("ppn=") if len(nodelst) > 1: ppn = nodelst[1] ppn = int(ppn.split(":")[0]) tasks = "%s -N%s" % (n, ppn) else: tasks = "%s" % n return tasks def _launcher(self, kdict={}): """prepare launch for parallel execution using srun equivalent to: srun -n(nodes) (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4 -N1', ...} """ mydict = self.settings.copy() mydict.update(kdict) #if self.scheduler: # mydict['nodes'] = self.njobs() str = """srun -n%(nodes)s %(python)s %(program)s %(progargs)s""" % mydict if self.scheduler: str = self.scheduler._submit(str) return str def map(self, func, *args, **kwds): return ParallelMapper.map(self, func, *args, **kwds) map.__doc__ = ((ParallelMapper.map.__doc__ or '')+(_launcher.__doc__ or '')) or None pass class Alps(ParallelMapper): """ """ def njobs(self, nodes): """convert node_string intended for scheduler to aprun node_string compute aprun task_string from node string of pattern = N[:TYPE][:ppn=P] For example, aprun.njobs("3:core4:ppn=2") yields '3 -N 2' """ nodestr = str(nodes) nodestr = nodestr.split(",")[0] # remove appended -l expressions nodelst = nodestr.split(":") n = int(nodelst[0]) nodelst = nodestr.split("ppn=") if len(nodelst) > 1: ppn = nodelst[1] ppn = int(ppn.split(":")[0]) tasks = "%s -N %s" % (n, ppn) else: tasks = "%s" % n return tasks def _launcher(self, kdict={}): """prepare launch for parallel execution using aprun equivalent to: aprun -n (nodes) (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4 -N 1', ...} """ mydict = self.settings.copy() mydict.update(kdict) #if self.scheduler: # mydict['nodes'] = self.njobs() str = """aprun -n %(nodes)s %(python)s %(program)s %(progargs)s""" % mydict if self.scheduler: str = self.scheduler._submit(str) return str def map(self, func, *args, **kwds): return ParallelMapper.map(self, func, *args, **kwds) map.__doc__ = ((ParallelMapper.map.__doc__ or '')+(_launcher.__doc__ or '')) or None pass ##### 'pre-configured' maps ##### # launcher + strategy class MpiPool(Mpi): def __init__(self, *args, **kwds): kwds['scatter'] = False Mpi.__init__(self, *args, **kwds) pass class MpiScatter(Mpi): def __init__(self, *args, **kwds): kwds['scatter'] = True Mpi.__init__(self, *args, **kwds) pass class SlurmPool(Slurm): def __init__(self, *args, **kwds): kwds['scatter'] = False Slurm.__init__(self, *args, **kwds) pass class SlurmScatter(Slurm): def __init__(self, *args, **kwds): kwds['scatter'] = True Slurm.__init__(self, *args, **kwds) pass class AlpsPool(Alps): def __init__(self, *args, **kwds): kwds['scatter'] = False Alps.__init__(self, *args, **kwds) pass class AlpsScatter(Alps): def __init__(self, *args, **kwds): kwds['scatter'] = True Alps.__init__(self, *args, **kwds) pass # scheduler + launcher class TorqueMpi(Mpi): def __init__(self, *args, **kwds): kwds['scheduler'] = Torque(*args, **kwds) kwds.pop('nodes', None) Mpi.__init__(self, **kwds) pass class TorqueSlurm(Slurm): def __init__(self, *args, **kwds): kwds['scheduler'] = Torque(*args, **kwds) kwds.pop('nodes', None) Slurm.__init__(self, **kwds) pass class MoabMpi(Mpi): def __init__(self, *args, **kwds): kwds['scheduler'] = Moab(*args, **kwds) kwds.pop('nodes', None) Mpi.__init__(self, **kwds) pass class MoabSlurm(Slurm): def __init__(self, *args, **kwds): kwds['scheduler'] = Moab(*args, **kwds) kwds.pop('nodes', None) Slurm.__init__(self, **kwds) pass # scheduler + launcher + strategy class TorqueMpiPool(TorqueMpi): def __init__(self, *args, **kwds): kwds['scatter'] = False TorqueMpi.__init__(self, *args, **kwds) pass class TorqueMpiScatter(TorqueMpi): def __init__(self, *args, **kwds): kwds['scatter'] = True TorqueMpi.__init__(self, *args, **kwds) pass class TorqueSlurmPool(TorqueSlurm): def __init__(self, *args, **kwds): kwds['scatter'] = False TorqueSlurm.__init__(self, *args, **kwds) pass class TorqueSlurmScatter(TorqueSlurm): def __init__(self, *args, **kwds): kwds['scatter'] = True TorqueSlurm.__init__(self, *args, **kwds) pass class MoabMpiPool(MoabMpi): def __init__(self, *args, **kwds): kwds['scatter'] = False MoabMpi.__init__(self, *args, **kwds) pass class MoabMpiScatter(MoabMpi): def __init__(self, *args, **kwds): kwds['scatter'] = True MoabMpi.__init__(self, *args, **kwds) pass class MoabSlurmPool(MoabSlurm): def __init__(self, *args, **kwds): kwds['scatter'] = False MoabSlurm.__init__(self, *args, **kwds) pass class MoabSlurmScatter(MoabSlurm): def __init__(self, *args, **kwds): kwds['scatter'] = True MoabSlurm.__init__(self, *args, **kwds) pass # backward compatibility def launch(command): """ launch mechanism for prepared launch command""" mapper = Mapper() subproc = mapper._Mapper__launch(command) #pid = subproc.pid error = subproc.wait() # block until all done if error: raise IOError("launch failed: %s" % command) return error def mpirun_tasks(nodes): """ Helper function. compute mpirun task_string from node string of pattern = N[:TYPE][:ppn=P] For example, mpirun_tasks("3:core4:ppn=2") yields 6 """ mapper = Mpi() return mapper.njobs(nodes) def srun_tasks(nodes): """ Helper function. compute srun task_string from node string of pattern = N[:ppn=P][,partition=X] For example, srun_tasks("3:ppn=2,partition=foo") yields '3 -N2' """ mapper = Slurm() return mapper.njobs(nodes) def aprun_tasks(nodes): """ Helper function. compute aprun task_string from node string of pattern = N[:TYPE][:ppn=P] For example, aprun_tasks("3:core4:ppn=2") yields '3 -N 2' """ mapper = Alps() return mapper.njobs(nodes) def serial_launcher(kdict={}): """ prepare launch for standard execution syntax: (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} """ mapper = SerialMapper() return mapper._launcher(kdict) def mpirun_launcher(kdict={}): """ prepare launch for parallel execution using mpirun syntax: mpiexec -np (nodes) (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} """ mapper = Mpi() return mapper._launcher(kdict) def srun_launcher(kdict={}): """ prepare launch for parallel execution using srun syntax: srun -n(nodes) (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4 -N1', ...} """ mapper = Slurm() return mapper._launcher(kdict) def aprun_launcher(kdict={}): """ prepare launch for parallel execution using aprun syntax: aprun -n(nodes) (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4 -N 1', ...} """ mapper = Alps() return mapper._launcher(kdict) def torque_launcher(kdict={}): #FIXME: update """ prepare launch for torque submission using mpiexec, srun, aprun, or serial syntax: echo \"mpiexec -np (nodes) (python) (program) (progargs)\" | qsub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) syntax: echo \"srun -n(nodes) (python) (program) (progargs)\" | qsub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) syntax: echo \"aprun -n (nodes) (python) (program) (progargs)\" | qsub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) syntax: echo \"(python) (program) (progargs)\" | qsub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4:nodetype:ppn=1', ...} """ mydict = defaults.copy() mydict.update(kdict) from .schedulers import torque_scheduler torque = torque_scheduler() #FIXME: hackery if mydict['scheduler'] == torque.srun: mydict['tasks'] = srun_tasks(mydict['nodes']) str = """ echo \"srun -n%(tasks)s %(python)s %(program)s %(progargs)s\" | qsub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict elif mydict['scheduler'] == torque.mpirun: mydict['tasks'] = mpirun_tasks(mydict['nodes']) str = """ echo \"%(mpirun)s -np %(tasks)s %(python)s %(program)s %(progargs)s\" | qsub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict elif mydict['scheduler'] == torque.aprun: mydict['tasks'] = aprun_tasks(mydict['nodes']) str = """ echo \"aprun -n %(tasks)s %(python)s %(program)s %(progargs)s\" | qsub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict else: # non-mpi launch str = """ echo \"%(python)s %(program)s %(progargs)s\" | qsub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict return str def moab_launcher(kdict={}): #FIXME: update """ prepare launch for moab submission using srun, mpirun, aprun, or serial syntax: echo \"srun -n(nodes) (python) (program) (progargs)\" | msub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) syntax: echo \"%(mpirun)s -np (nodes) (python) (program) (progargs)\" | msub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) syntax: echo \"aprun -n (nodes) (python) (program) (progargs)\" | msub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) syntax: echo \"(python) (program) (progargs)\" | msub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4:ppn=1,partition=xx', ...} """ mydict = defaults.copy() mydict.update(kdict) from .schedulers import moab_scheduler moab = moab_scheduler() #FIXME: hackery if mydict['scheduler'] == moab.mpirun: mydict['tasks'] = mpirun_tasks(mydict['nodes']) str = """ echo \"%(mpirun)s -np %(tasks)s %(python)s %(program)s %(progargs)s\" | msub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict elif mydict['scheduler'] == moab.srun: mydict['tasks'] = srun_tasks(mydict['nodes']) str = """ echo \"srun -n%(tasks)s %(python)s %(program)s %(progargs)s\" | msub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict elif mydict['scheduler'] == moab.aprun: mydict['tasks'] = aprun_tasks(mydict['nodes']) str = """ echo \"aprun -n %(tasks)s %(python)s %(program)s %(progargs)s\" | msub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict else: # non-mpi launch str = """ echo \"%(python)s %(program)s %(progargs)s\" | msub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict return str def lsfmx_launcher(kdict={}): #FIXME: update """ prepare launch for Myrinet / LSF submission of parallel python using mpich_mx syntax: bsub -K -W(timelimit) -n (nodes) -o (outfile) -a mpich_mx -q (queue) -J (progname) mpich_mx_wrapper (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} """ mydict = defaults.copy() mydict.update(kdict) #str = """ bsub -K -W%(timelimit)s -n %(nodes)s -o ./%%J.out -a mpich_mx -q %(queue)s -J %(progname)s mpich_mx_wrapper %(python)s %(program)s %(progargs)s""" % mydict str = """ bsub -K -W%(timelimit)s -n %(nodes)s -o %(outfile)s -a mpich_mx -q %(queue)s -J %(progname)s mpich_mx_wrapper %(python)s %(program)s %(progargs)s""" % mydict return str def lsfgm_launcher(kdict={}): #FIXME: update """ prepare launch for Myrinet / LSF submission of parallel python using mpich_gm syntax: bsub -K -W(timelimit) -n (nodes) -o (outfile) -a mpich_gm -q (queue) -J (progname) gmmpirun_wrapper (python) (program) (progargs) NOTES: run non-python commands with: {'python':'', ...} """ mydict = defaults.copy() mydict.update(kdict) #str = """ bsub -K -W%(timelimit)s -n %(nodes)s -o ./%%J.out -a mpich_gm -q %(queue)s -J %(progname)s gmmpirun_wrapper %(python)s %(program)s %(progargs)s""" % mydict str = """ bsub -K -W%(timelimit)s -n %(nodes)s -o %(outfile)s -a mpich_gm -q %(queue)s -J %(progname)s gmmpirun_wrapper %(python)s %(program)s %(progargs)s""" % mydict return str def all_launchers(): import pyina.launchers as launchers L = ["launchers.%s" % f for f in dir(launchers) if f[-8:] == "launcher"] return L def all_launches(kdict = {}): import pyina.launchers as launchers, traceback, os.path stack = traceback.extract_stack() caller = stack[ -min(len(stack),2) ][0] # defaults['program'] = caller defaults['progname'] = os.path.basename(caller) # for key in defaults.keys(): if key not in kdict: kdict[key] = defaults[key] L = all_launchers() # str = [] for launcher in L: str.append(eval('%s(kdict)' % (launcher))) str.append('') return '\n'.join(str) def __launch(): doc = """ # Returns a sample command for launching parallel jobs. # Helpful in making docstrings, by allowing the following in your docstring # "%%(launcher)s", and then doing string interpolation "{'launcher' : all_launches()}" # and you will get: %(launcher)s # all_launches does a stack traceback to find the name of the program containing its caller. # This allows interpolation of the __file__ variable into the mpi launch commands. # Most flexibly, all_launches should be called with a dictionary. Here are the defaults. # defaults = { 'timelimit' : '00:02', # 'program' : *name of the caller*, # 'progname' : *os.path.basename of the caller*, # 'outfile' : *path of the output file*, # 'errfile' : *path of the error file*, # 'jobfile' : *path of jobid file*, # 'queue' : 'normal', # 'python' : '`which python`', # 'nodes' : '1', # 'progargs' : '', # 'scheduler' : '', # } """ % {'launcher' : all_launches({'program':__file__, 'timelimit': '00:02', 'outfile':'./results.out'}) } #""" % defaults.update({'launcher' : all_launches(**defaults)}) return doc if __name__=='__main__': # from mystic import helputil # helputil.paginate(__launch()) print("python launch") defaults['program'] = "tools.py" launch(serial_launcher(defaults)) print("serial launch") settings = {'python':'', 'program':"hostname"} launch(serial_launcher(settings)) # EOF uqfoundation-pyina-c629452/pyina/mappers.py000077500000000000000000000025231467660040300210000ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ tiny function wrapper to make ez_map interface for mappers more standard provides: mapper_str = mapper() interface (for a the raw map function, use parallel_map directly) """ __all__ = ['worker_pool','scatter_gather'] def worker_pool(): """use the 'worker pool' strategy; hence one job is allocated to each worker, and the next new work item is provided when a node completes its work""" #from mpi_pool import parallel_map as map #return map return "mpi_pool" def scatter_gather(): """use the 'scatter-gather' strategy; hence split the workload as equally as possible across all available workers in a single pass""" #from mpi_scatter import parallel_map as map #return map return "mpi_scatter" # backward compatibility carddealer_mapper = worker_pool equalportion_mapper = scatter_gather def all_mappers(): import mappers L = ["mappers.%s" % f for f in dir(mappers) if f[-6:] == "mapper"] return L if __name__=='__main__': print(all_mappers()) # EOF uqfoundation-pyina-c629452/pyina/mpi.py000066400000000000000000000322311467660040300201120ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ This module contains the base of map and pipe interfaces to the mpi4py module. Pipe methods provided: ??? Map methods provided: map - blocking and ordered worker pool [returns: list] Base classes: Mapper - base class for pipe-based mapping Usage ===== A typical call to a pyina mpi map will roughly follow this example: >>> # instantiate and configure a scheduler >>> from pyina.schedulers import Torque >>> config = {'nodes'='32:ppn=4', 'queue':'dedicated', 'timelimit':'11:59'} >>> torque = Torque(**config) >>> >>> # instantiate and configure a worker pool >>> from pyina.launchers import Mpi >>> pool = Mpi(scheduler=torque) >>> >>> # do a blocking map on the chosen function >>> print (pool.map(pow, [1,2,3,4], [5,6,7,8])) Several common configurations are available as pre-configured maps. The following is identical to the above example: >>> # instantiate and configure a pre-configured worker pool >>> from pyina.launchers import TorqueMpiPool >>> config = {'nodes'='32:ppn=4', 'queue':'dedicated', 'timelimit':'11:59'} >>> pool = TorqueMpiPool(**config) >>> >>> # do a blocking map on the chosen function >>> print (pool.map(pow, [1,2,3,4], [5,6,7,8])) Notes ===== See pyina.launchers and pyina.schedulers for more launchers and schedulers. """ __all__ = ['_save', '_debug', 'Mapper', 'world'] ##### shortcuts ##### from mpi4py import MPI world = MPI.COMM_WORLD # (also: world.rank, world.size) import dill try: getattr(MPI,'pickle',getattr(MPI,'_p_pickle',None)).dumps = dill.dumps getattr(MPI,'pickle',getattr(MPI,'_p_pickle',None)).loads = dill.loads except AttributeError: pass ##################### from subprocess import Popen, call from pathos.abstract_launcher import AbstractWorkerPool from pathos.helpers import cpu_count import os, os.path, sys import tempfile from dill.temp import dump, dump_source from pyina.tools import which_python, which_mpirun, which_strategy _HOLD = [] _SAVE = [False] import logging log = logging.getLogger("mpi") log.addHandler(logging.StreamHandler()) def _save(boolean): """if True, save temporary files after pickling; useful for debugging""" if boolean: _SAVE[0] = True else: _SAVE[0] = False _HOLD = [] return def _debug(boolean): """if True, print debuging info and save temporary files after pickling""" if boolean: log.setLevel(logging.DEBUG) _save(True) else: log.setLevel(logging.WARN) _save(False) return _pid = '.' + str(os.getpid()) + '.' defaults = { 'nodes' : str(cpu_count()), 'program' : which_strategy(lazy=True) or 'ezscatter', # serialize to tempfile 'mpirun' : which_mpirun() or 'mpiexec', 'python' : which_python(lazy=True) or 'python', 'progargs' : '', 'outfile' : 'results%sout' % _pid, 'errfile' : 'errors%sout' % _pid, 'jobfile' : 'job%sid' % _pid, 'scheduler' : '', 'timelimit' : '00:02', 'queue' : 'normal', 'workdir' : '.' } #FIXME FIXME: __init__ and self for 'nodes' vs 'ncpus' is confused; see __repr__ class Mapper(AbstractWorkerPool): """ Mapper base class for pipe-based mapping. """ def __init__(self, *args, **kwds): """\nNOTE: if number of nodes is not given, will default to 1. If source is not given, will attempt to minimially use TemporaryFiles. If workdir is not given, will default to scheduler's workdir or $WORKDIR. If scheduler is not given, will default to only run on the current node. If timeout is not given, will default to scheduler's timelimit or INF. For more details, see the docstrings for the "map" method, or the man page for the associated launcher (e.g mpirun, mpiexec). """ AbstractWorkerPool.__init__(self, *args, **kwds) self.scheduler = kwds.get('scheduler', None) self.scatter = True #bool(kwds.get('scatter', True)) self.source = bool(kwds.get('source', False)) self.workdir = kwds.get('workdir', None) self.timeout = kwds.get('timeout', None) if self.timeout == None: if self.scheduler: from pyina.tools import isoseconds self.timeout = isoseconds(self.scheduler.timelimit) else: from numpy import inf self.timeout = inf #XXX: better than defaults.timelimit ? elif isinstance(self.timeout, str): from pyina.tools import isoseconds self.timeout = isoseconds(self.timeout) if self.workdir == None: if self.scheduler: self.workdir = self.scheduler.workdir else: self.workdir = os.environ.get('WORKDIR', os.path.curdir) self.workdir = os.path.abspath(self.workdir) return if AbstractWorkerPool.__init__.__doc__: __init__.__doc__ = AbstractWorkerPool.__init__.__doc__ + __init__.__doc__ def __settings(self): """apply default settings, then update with given settings""" env = defaults.copy() [env.update({k:v}) for (k,v) in self.__dict__.items() if k in defaults] [env.update({'nodes':v}) for (k,v) in self.__dict__.items() if k.endswith('nodes')] # deal with self.__nodes return env def __launch(self, command): """launch mechanism for prepared launch command""" executable = command.split("|")[-1].split()[0] from pox import which if not which(executable): raise IOError("launch failed: %s not found" % executable) return Popen([command], shell=True) #FIXME: shell=True is insecure def _launcher(self, kdict={}): """prepare launch command based on current settings equivalent to: NotImplemented """ mydict = self.settings.copy() mydict.update(kdict) str = "launch command missing" % mydict return str def _pickleargs(self, args, kwds): """pickle.dump args and kwds to tempfile""" # standard pickle.dump of inputs to a NamedTemporaryFile return dump((args, kwds), suffix='.arg', dir=self.workdir) def _modularize(self, func): """pickle.dump function to tempfile""" if not self.source: # standard pickle.dump of inputs to a NamedTemporaryFile return dump(func, suffix='.pik', dir=self.workdir) # write func source to a NamedTemporaryFile (instead of pickle.dump) # ez*.py requires 'FUNC = ' to be included as module.FUNC return dump_source(func, alias='FUNC', dir=self.workdir) def _modulenamemangle(self, modfilename): """mangle modulename string for use by mapper""" if not self.source: return modfilename return os.path.splitext(os.path.basename(modfilename))[0] def _save_in(self, *args): """save input tempfiles - path to pickled function source (e.g. 'my_func.py or 'my_func.pik') - path to pickled function inputs (e.g. 'my_args.arg') """ # should check 'if modfilename' and 'if argfilename' modfilename = args[0] argfilename = args[1] modext = os.path.splitext(os.path.basename(modfilename))[-1] argext = os.path.splitext(os.path.basename(argfilename))[-1] # getsource; FUNC call('cp -f %s modfile%s' % (modfilename, modext), shell=True) # pickled inputs call('cp -f %s argfile%s' % (argfilename, argext), shell=True) return def _save_out(self, *args): """save output tempfiles - path to pickled function output (e.g. 'my_results') """ # should check 'if resfilename' resfilename = args[0] resext = os.path.splitext(os.path.basename(resfilename))[-1] # pickled output call('cp -f %s resfile%s' % (resfilename, resext), shell=True) return def _cleanup(self, *args): """clean-up any additional tempfiles - path to pickled function output (e.g. 'my_results') - path to pickled function source (e.g. 'my_func.py or 'my_func.pik') - path to pickled function inputs (e.g. 'my_args.arg') """ resfilename = args[0] call('rm -f %s' % resfilename, shell=True) if not self.source: # do nothing return modfilename = args[1] argfilename = args[2] call('rm -f %sc' % modfilename, shell=True) return def map(self, func, *args, **kwds): """ The function 'func', it's arguments, and the results of the map are all stored and shipped across communicators as pickled strings. Optional Keyword Arguments: - onall = if True, include master as a worker [default: True] NOTE: 'onall' defaults to True for both the scatter-gather and the worker pool strategies. A worker pool with onall=True may have added difficulty in pickling functions, due to asynchronous message passing with itself. Additional keyword arguments are passed to 'func' along with 'args'. """ # set strategy if self.scatter: kwds['onall'] = kwds.get('onall', True) else: kwds['onall'] = kwds.get('onall', True) #XXX: has pickling issues config = {} config['program'] = which_strategy(self.scatter, lazy=True) # serialize function and arguments to files modfile = self._modularize(func) argfile = self._pickleargs(args, kwds) # Keep the above handles as long as you want the tempfiles to exist if _SAVE[0]: _HOLD.append(modfile) _HOLD.append(argfile) # create an empty results file resfilename = tempfile.mktemp(dir=self.workdir) # process the module name modname = self._modulenamemangle(modfile.name) # build the launcher's argument string config['progargs'] = ' '.join([modname, argfile.name, \ resfilename, self.workdir]) #XXX: better with or w/o scheduler baked into command ? #XXX: better... if self.scheduler: self.scheduler.submit(command) ? #XXX: better if self.__launch modifies command to include scheduler ? if _SAVE[0]: self._save_in(modfile.name, argfile.name) # func, pickled input # create any necessary job files if self.scheduler: config.update(self.scheduler._prepare()) ###################################################################### # build the launcher command command = self._launcher(config) log.info('(skipping): %s' % command) if log.level == logging.DEBUG: error = False res = [] else: try: subproc = self.__launch(command) # sumbit the jobs #print "after __launch" #pid = subproc.pid # get process id error = subproc.wait() # block until all done ## just to be sure... here's a loop to wait for results file ## maxcount = self.timeout; counter = 0 #print "before wait" while not os.path.exists(resfilename): call('sync', shell=True) from time import sleep sleep(1); counter += 1 if counter >= maxcount: print("Warning: exceeded timeout (%s s)" % maxcount) break #print "after wait" # read result back res = dill.load(open(resfilename,'rb')) #print "got result" except: error = True #print "got error" ###################################################################### # cleanup files if _SAVE[0]: if log.level == logging.WARN: self._save_out(resfilename) # pickled output else: modfile.close(); argfile.close() # pypy removes closed tempfiles if modfile in _HOLD: _HOLD.remove(modfile) if argfile in _HOLD: _HOLD.remove(argfile) self._cleanup(resfilename, modfile.name, argfile.name) if self.scheduler and not _SAVE[0]: self.scheduler._cleanup() if error: raise IOError("launch failed: %s" % command) return res #def imap(self, func, *args, **kwds): # """'non-blocking' and 'ordered' # """ # return #def uimap(self, func, *args, **kwds): # """'non-blocking' and 'unordered' # """ # return #def amap(self, func, *args, **kwds): # """'asynchronous' map(); use "get()" to retrieve results # """ # return def __repr__(self): if self.scheduler: scheduler = self.scheduler.__class__.__name__ else: scheduler = "None" mapargs = (self.__class__.__name__, self.nodes, scheduler) return "" % mapargs # interface settings = property(__settings) #XXX: set? pass # EOF uqfoundation-pyina-c629452/pyina/mpi_pool.py000066400000000000000000000136251467660040300211510ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from mpi4py import MPI as mpi import dill try: getattr(mpi,'pickle',getattr(mpi,'_p_pickle',None)).dumps = dill.dumps getattr(mpi,'pickle',getattr(mpi,'_p_pickle',None)).loads = dill.loads except AttributeError: pass from pyina.tools import lookup from pathos.helpers import ProcessPool as MPool master = 0 comm = mpi.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() any_source = mpi.ANY_SOURCE any_tag = mpi.ANY_TAG EXITTAG = 0 __SKIP = [True] import logging log = logging.getLogger("mpi_pool") log.addHandler(logging.StreamHandler()) def _debug(boolean): """print debug statements""" if boolean: log.setLevel(logging.DEBUG) else: log.setLevel(logging.WARN) return def __queue(*inputs): "iterator that groups inputs by index (i.e. [(x[0], a[0]),(x[1], a[1])])" return zip(*inputs) def __index(*inputs): """build an index iterator for the given inputs""" NJOBS = len(inputs[0]) return iter(range(NJOBS)) def parallel_map(func, *seq, **kwds): """the worker pool strategy for mpi""" skip = not bool(kwds.get('onall', True)) __SKIP[0] = skip NJOBS = len(seq[0]) nodes = size if size <= NJOBS+skip else NJOBS+skip # nodes <= NJOBS+(master) #queue = __queue(*seq) #XXX: passing the *data* queue = __index(*seq) #XXX: passing the *index* results = [''] * NJOBS if rank == master: log.info("size: %s, NJOBS: %s, nodes: %s, skip: %s" % (size, NJOBS, nodes, skip)) if nodes == 1: # the pool is just the master if skip: raise ValueError("There must be at least one worker node") return map(func, *seq) # spawn a separate process for jobs running on the master if not skip: pool = MPool(1) #XXX: poor pickling... use iSend/iRecv instead? #input = queue.next() #XXX: receiving the *data* input = lookup(seq, next(queue)) #XXX: receives an *index* log.info("MASTER SEND'ING(0)") mresult, mjobid = pool.apply_async(func, args=input), 0 # farm out to workers: 1-N for indexing, 0 reserved for termination for worker in range(1, nodes): #XXX: don't run on master... # master send next job to worker 'worker' with tag='worker' log.info("WORKER SEND'ING(%s)" % (worker-skip,)) comm.send(next(queue), worker, worker) # start receiving recvjob = 0; donejob = 0 sendjob = nodes while recvjob < NJOBS: # was: for job in range(NJOBS) log.info("--job(%s,%s)--" % (sendjob-skip, recvjob)) if recvjob < NJOBS and donejob < nodes-1: status = mpi.Status() # master receive jobs from any_source and any_tag log.info("RECV'ING FROM WORKER") message = comm.recv(source=any_source,tag=any_tag,status=status) sender = status.source anstag = status.tag if anstag: recvjob += 1 # don't count a 'donejob' results[anstag-skip] = message # store the received message log.info("WORKER(%s): %s" % (anstag-skip, message)) if (sendjob-skip < NJOBS): # then workers are not done # master send next job to worker 'sender' with tag='jobid' log.info("WORKER SEND'ING(%s)" % (sendjob-skip)) input = next(queue) comm.send(input, sender, sendjob) sendjob += 1 else: # workers are done # send the "exit" signal log.info("WORKER SEND'ING(DONE)") comm.send("done", sender, EXITTAG) donejob += 1 log.info("WORKER LOOP DONE") # check if the master is done log.info("--job(%s,%s)--" % (sendjob-skip, recvjob)) if not skip and mresult.ready(): log.info("RECV'ING FROM MASTER") results[mjobid] = mresult.get() log.info("MASTER(%s): %s" % (mjobid, results[mjobid])) recvjob += 1 if (sendjob < NJOBS): log.info("MASTER SEND'ING(%s)" % sendjob) #input = queue.next() #XXX: receiving the *data* input = lookup(seq, next(queue)) #XXX: receives an *index* mresult, mjobid = pool.apply_async(func, args=input),sendjob sendjob += 1 else: mresult.ready = lambda : False log.info("MASTER LOOP DONE") log.info("WE ARE EXITING") if not skip: pool.close() pool.join() elif (nodes != size) and (rank >= nodes): # then skip this node... pass else: # then this is a worker node while True: # receive jobs from master @ any_tag status = mpi.Status() message = comm.recv(source=master, tag=any_tag, status=status) tag = status.tag if tag == EXITTAG: # worker is done break # worker evaluates received job #result = func(*message) #XXX: receiving the *data* result = func(*lookup(seq, message)) #XXX: receives an *index* # send result back to master comm.send(result, master, tag) #XXX: or write to results then merge? comm.barrier() return results if __name__ == '__main__': _debug(False) def squared(x): return x**2 x = range(10) y = parallel_map(squared, x)#, onall=False) if rank == master: print(("f: %s" % squared.__name__)) print(("x: %s" % x)) print(("y: %s" % y)) # EOF uqfoundation-pyina-c629452/pyina/mpi_scatter.py000066400000000000000000000072741467660040300216500ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from mpi4py import MPI as mpi import dill try: getattr(mpi,'pickle',getattr(mpi,'_p_pickle',None)).dumps = dill.dumps getattr(mpi,'pickle',getattr(mpi,'_p_pickle',None)).loads = dill.loads except AttributeError: pass from pyina.tools import get_workload, balance_workload, lookup master = 0 comm = mpi.COMM_WORLD size = comm.Get_size() rank = comm.Get_rank() any_source = mpi.ANY_SOURCE any_tag = mpi.ANY_TAG EXITTAG = 0 __SKIP = [None] def __queue(*inputs): "iterator that groups inputs by index (i.e. [(x[0], a[0]),(x[1], a[1])])" #NJOBS = len(inputs[0]) #return (lookup(inputs, *get_workload(i, size, NJOBS, skip=__SKIP[0])) for i in range(size)) load = __index(*inputs) return (lookup(inputs, next(load)) for i in range(size)) def __index(*inputs): """build an index iterator for the given inputs""" NJOBS = len(inputs[0]) return (get_workload(i, size, NJOBS, skip=__SKIP[0]) for i in range(size)) #return izip(*balance_workload(size, NJOBS, skip=__SKIP[0])) def parallel_map(func, *seq, **kwds): """the scatter-gather strategy for mpi""" skip = not bool(kwds.get('onall', True)) if skip is False: skip = None else: if size == 1: raise ValueError("There must be at least one worker node") skip = master __SKIP[0] = skip NJOBS = len(seq[0]) # queue = __queue(*seq) #XXX: passing the *data* queue = __index(*seq) #XXX: passing the *index* results = [''] * NJOBS if rank == master: # each processor needs to do its set of jobs. message = next(queue) # send jobs to workers for worker in range(1, size): # master sending seq[ib:ie] to worker 'worker' comm.send(next(queue), worker, 0) else: # worker 'rank' receiving job status = mpi.Status() message = comm.recv(source=master, tag=any_tag, status=status) # message received; no need to parse tags # now message is the part of seq that each worker has to do # result = map(func, *message) #XXX: receiving the *data* result = list(map(func, *lookup(seq, *message))) #XXX: receives an *index* if rank == master: _b, _e = get_workload(rank, size, NJOBS, skip=skip) #_b, _e = balance_workload(size, NJOBS, rank, skip=skip) results[_b:_e] = result[:] # at this point, all nodes must sent to master if rank != master: # worker 'rank' sending answer to master comm.send(result, master, rank) else: # master needs to receive once for each worker for worker in range(1, size): # master listening for worker status = mpi.Status() message = comm.recv(source=any_source, tag=any_tag, status=status) sender = status.source #anstag = status.tag # master received answer from worker 'sender' ib, ie = get_workload(sender, size, NJOBS, skip=skip) #ib, ie = balance_workload(size, NJOBS, sender, skip=skip) results[ib:ie] = message # master received results[ib:ie] from worker 'sender' #comm.barrier() return results if __name__ == '__main__': def squared(x): return x**2 x = range(10) y = parallel_map(squared, x)#, onall=False) if rank == master: print("f: %s" % squared.__name__) print("x: %s" % x) print("y: %s" % y) # EOF uqfoundation-pyina-c629452/pyina/schedulers.py000077500000000000000000000316271467660040300215010ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ This module contains bindings to some common schedulers. Base classes: Scheduler - base class for cpu cluster scheduling Schedulers: Torque - Moab - Lsf - Usage ===== A typical call to a pyina mpi map will roughly follow this example: >>> # instantiate and configure a scheduler >>> from pyina.schedulers import Torque >>> config = {'nodes'='32:ppn=4', 'queue':'dedicated', 'timelimit':'11:59'} >>> torque = Torque(**config) >>> >>> # instantiate and configure a worker pool >>> from pyina.mpi import Mpi >>> pool = Mpi(scheduler=torque) >>> >>> # do a blocking map on the chosen function >>> results = pool.map(pow, [1,2,3,4], [5,6,7,8]) Notes ===== The schedulers provided here are built through pipes and not direct bindings, and are currently somewhat limited on inspecting the status of a submitted job and killing a submitted job. Currently, the use of pre-built scheduler job files are also not supported. """ """ tiny function wrapper to provide ez_map interface with schedulers provides: scheduler_obj = scheduler() interface """ __all__ = ['Scheduler', 'Torque', 'Moab', 'Lsf'] from pyina.mpi import defaults from subprocess import Popen, call import os, os.path import tempfile import dill as pickle import logging log = logging.getLogger("schedulers") log.addHandler(logging.StreamHandler()) class Scheduler(object): """ Scheduler base class for cpu cluster scheduling. """ __nodes = 1 def __init__(self, *args, **kwds): """ Important class members: nodes - number (and potentially description) of workers queue - name of the scheduler queue [default: 'normal'] timelimit - upper limit of clocktime for each scheduled job workdir - associated $WORKDIR for scratch calculations/files Other class members: jobfile - name of the 'job' file pyina.mpi builds for the scheduler outfile - name of the 'output' file the scheduler will write to errfile - name of the 'error' file the scheduler will write to NOTE: The format for timelimit is typically 'HH:MM' or 'HH:MM:SS', while the format for nodes is typically 'n' or some variant of 'n:ppn=m' where 'n' is number of nodes and 'm' is processors per node. For more details, see the docstrings for the "sumbit" method, or the man page for the associated scheduler. """ self.__init(*args, **kwds) self.timelimit = kwds.get('timelimit', defaults['timelimit']) from numbers import Integral if isinstance(self.timelimit, Integral): from pyina.tools import isoformat self.timelimit = isoformat(self.timelimit) self.queue = kwds.get('queue', defaults['queue']) self.workdir = kwds.get('workdir', os.environ.get('WORKDIR', os.path.curdir)) #self.workdir = kwds.get('workdir', os.environ.get('WORKDIR', os.path.expanduser("~")) self.workdir = os.path.abspath(self.workdir) self.jobfile = kwds.get('jobfile', defaults['jobfile']) self.outfile = kwds.get('outfile', defaults['outfile']) self.errfile = kwds.get('errfile', defaults['errfile']) #self.nodes = kwds.get('nodes', defaults['nodes']) return def __init(self, *args, **kwds): """default filter for __init__ inputs """ # allow default arg for 'nodes', but not if in kwds if len(args): try: nodes = kwds['nodes'] msg = "got multiple values for keyword argument 'nodes'" raise TypeError(msg) except KeyError: nodes = args[0] else: nodes = kwds.get('nodes', self.__nodes) try: self.nodes = nodes except TypeError: pass # then self.nodes is read-only return def __settings(self): """fetch the settings for the map (from defaults and self.__dict__)""" env = defaults.copy() [env.update({k:v}) for (k,v) in self.__dict__.items() if k in defaults] [env.update({'nodes':v}) for (k,v) in self.__dict__.items() if k.endswith('nodes')] # deal with self.__nodes return env def _prepare(self): """prepare the scheduler files (jobfile, outfile, and errfile)""" pid = '.' + str(os.getpid()) + '.' jobfilename = tempfile.mktemp(prefix='tmpjob'+pid, dir=self.workdir) outfilename = tempfile.mktemp(prefix='tmpout'+pid, dir=self.workdir) errfilename = tempfile.mktemp(prefix='tmperr'+pid, dir=self.workdir) self.jobfile = jobfilename self.outfile = outfilename self.errfile = errfilename d = {'jobfile':jobfilename,'outfile':outfilename,'errfile':errfilename} return d def _cleanup(self): """clean-up scheduler files (jobfile, outfile, and errfile)""" call('rm -f %s' % self.jobfile, shell=True) call('rm -f %s' % self.outfile, shell=True) call('rm -f %s' % self.errfile, shell=True) #print "called scheduler cleanup" return def fetch(self, outfile, subproc=None): #FIXME: call fetch after submit??? """fetch result from the results file""" try: error = subproc.wait() # block until all done res = pickle.load(open(outfile,'rb')) except: error = True if error: raise IOError("fetch failed: %s" % outfile) return res def _submit(self, command, kdict={}): """prepare the given command for the scheduler equivalent to: (command) """ mydict = self.settings.copy() mydict.update(kdict) str = command #% mydict return str def submit(self, command): self._prepare() command = self._submit(command) log.info('(skipping): %s' % command) if log.level != logging.DEBUG: subproc = self.__launch(command) #pid = subproc.pid error = subproc.wait() # block until all done if error: raise IOError("launch failed: %s" % command) return error #self._cleanup() return submit.__doc__ = _submit.__doc__.replace('prepare','submit').replace('command for','command to') #XXX: hacky def __launch(self, command): """launch mechanism for prepared launch command""" executable = command.split("|")[-1].split()[0] from pox.shutils import which if not which(executable): raise IOError("launch failed: %s not found" % executable) return Popen([command], shell=True) #FIXME: shell=True is insecure def __repr__(self): subargs = (self.__class__.__name__, self.nodes, self.timelimit, self.queue) return "" % subargs # interface settings = property(__settings) #XXX: set? pass class Torque(Scheduler): """ Scheduler that leverages the torque scheduler. """ def _submit(self, command, kdict={}): """prepare the given command for submission with qsub equivalent to: echo \"(command)\" | qsub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4:nodetype:ppn=1', ...} """ mydict = self.settings.copy() mydict.update(kdict) str = """ echo \"""" + command + """\" | """ str += """qsub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict return str def submit(self, command): Scheduler.submit(self, command) return submit.__doc__ = _submit.__doc__.replace('prepare','submit').replace('for submission','') #XXX: hacky pass class Moab(Scheduler): """ Scheduler that leverages the moab scheduler. """ def _submit(self, command, kdict={}): """prepare the given command for submission with msub ` equivalent to: echo \"(command)\" | msub -l nodes=(nodes) -l walltime=(timelimit) -o (outfile) -e (errfile) -q (queue) NOTES: run non-python commands with: {'python':'', ...} fine-grained resource utilization with: {'nodes':'4:ppn=1,partition=xx', ...} """ mydict = self.settings.copy() mydict.update(kdict) str = """ echo \"""" + command + """\" | """ str += """msub -l nodes=%(nodes)s -l walltime=%(timelimit)s -o %(outfile)s -e %(errfile)s -q %(queue)s &> %(jobfile)s""" % mydict return str def submit(self, command): Scheduler.submit(self, command) return submit.__doc__ = _submit.__doc__.replace('prepare','submit').replace('for submission','') #XXX: hacky pass class Lsf(Scheduler): """ Scheduler that leverages the lsf scheduler. """ def __init__(self, *args, **kwds): Scheduler.__init__(self, *args, **kwds) mpich = kwds.get('mpich', '') # required for mpich_gm and mpich_mx if mpich in ['gm', 'mpich_gm', 'mpich-gm']: mpich = 'gm' elif mpich in ['mx', 'mpich_mx', 'mpich-mx']: mpich = 'mx' self.mpich = mpich return __init__.__doc__ = Scheduler.__init__.__doc__ def _submit(self, command, kdict={}): """prepare the given command for submission with bsub equivalent to: bsub -K -W (timelimit) -n (nodes) -o (outfile) -e (errfile) -q (queue) -J (progname) "(command)" NOTES: if mpich='mx', uses "-a mpich_mx mpich_mx_wrapper" instead of given launcher if mpich='gm', uses "-a mpich_gm gmmpirun_wrapper" instead of given launcher run non-python commands with: {'python':'', ...} """ mydict = self.settings.copy() # DISCOVER THE CALLER #import traceback #stack = traceback.extract_stack() #caller = stack[ -min(len(stack),2) ][0] #mydict['program'] = caller caller = mydict['program'] progname = os.path.basename(caller) mydict['progname'] = progname.lstrip('`which ').rstrip('`') mydict.update(kdict) #str = """ bsub -K -W %(timelimit)s -n %(nodes)s -o ./%%J.out -e %(errfile)s -q %(queue)s -J %(progname)s -a mpich_gm gmmpirun_wrapper %(python)s %(program)s %(progargs)s &> %(jobfile)s""" % mydict #str = """ bsub -K -W %(timelimit)s -n %(nodes)s -o %(outfile)s -e %(errfile)s -q %(queue)s -J %(progname)s -a mpich_gm gmmpirun_wrapper %(python)s %(program)s %(progargs)s &> %(jobfile)s""" % mydict #str = """ echo \"""" + command + """\" | """ #str += """bsub -K -W %(timelimit)s -n %(nodes)s -o %(outfile)s -e %(errfile)s -q %(queue)s -J %(progname)s %(esubapp) &> %(jobfile)s""" % mydict def _get_comm(comm): t = [] # should never return empty... s = comm.split()[1:] # strip off the launcher for x in s: if x.startswith('-') or x.isdigit(): continue t = s[s.index(x):] # don't want -n %(nodes)s either break return ' '.join(t) if self.mpich == 'gm': mydict['command'] = 'gmmpirun_wrapper ' + _get_comm(command) mydict['esubapp'] = "-a mpich_gm" elif self.mpich == 'mx': mydict['command'] = 'mpich_mx_wrapper ' + _get_comm(command) mydict['esubapp'] = "-a mpich_mx" else: mydict['command'] = command #'"' + command + '"' mydict['esubapp'] = "" str = """bsub -K -W %(timelimit)s -n %(nodes)s -o %(outfile)s -e %(errfile)s -q %(queue)s -J %(progname)s %(esubapp)s %(command)s &> %(jobfile)s""" % mydict return str def submit(self, command): Scheduler.submit(self, command) return submit.__doc__ = _submit.__doc__.replace('prepare','submit').replace('for submission','') #XXX: hacky pass # some references for bsub and mpich_*: # http://www.cisl.ucar.edu/docs/LSF/7.0.3/command_reference/bsub.cmdref.html # http://www.mun.ca/hpc/lsf/examples.html # http://its2.unc.edu/dci_components/lsf/mpich_parallel.htm # http://ait.web.psi.ch/services/linux/hpc/mpich/using_mpich_gm.html # backward compatibility class torque_scheduler(object): """torque scheduler -- configured for mpirun, srun, aprun, or serial""" mpirun = "torque_mpirun" srun = "torque_srun" aprun = "torque_aprun" serial = "torque_serial" pass class moab_scheduler(object): """moab scheduler -- configured for mpirun, srun, aprun, or serial""" mpirun = "moab_mpirun" srun = "moab_srun" aprun = "moab_aprun" serial = "moab_serial" pass def all_schedulers(): import pyina.schedulers as schedulers L = ["schedulers.%s" % f for f in dir(schedulers) if f[-9:] == "scheduler"] return L if __name__=='__main__': print(all_schedulers()) # EOF uqfoundation-pyina-c629452/pyina/tests/000077500000000000000000000000001467660040300201145ustar00rootroot00000000000000uqfoundation-pyina-c629452/pyina/tests/__init__.py000066400000000000000000000007421467660040300222300ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ to run this test suite, first build and install `pyina`. $ python -m pip install ../.. then run the tests with: $ python -m pyina.tests or, if `nose` is installed: $ nosetests """ uqfoundation-pyina-c629452/pyina/tests/__main__.py000066400000000000000000000016041467660040300222070ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2018-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE import glob import os import sys import subprocess as sp python = sys.executable try: import pox python = pox.which_python(version=True) or python except ImportError: pass shell = sys.platform[:3] == 'win' suite = os.path.dirname(__file__) or os.path.curdir tests = glob.glob(suite + os.path.sep + 'test_*.py') if __name__ == '__main__': failed = 0 for test in tests: p = sp.Popen([python, test], shell=shell).wait() if p: print('F', end='', flush=True) failed = 1 else: print('.', end='', flush=True) print('') exit(failed) uqfoundation-pyina-c629452/pyina/tests/test_ezmap.py000066400000000000000000000050551467660040300226460ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE # old-style maps (deprecated) import time x = range(18) delay = 0.01 items = 20 def busy_add(x,y, delay=0.01): for n in range(x): x += n for n in range(y): y -= n import time time.sleep(delay) return x + y def busy_squared(x): import time, random time.sleep(0.01*random.random()) return x*x def squared(x): return x*x def quad_factory(a=1, b=1, c=0): def quad(x): return a*x**2 + b*x + c return quad square_plus_one = quad_factory(2,0,1) x2 = list(map(squared, x)) def check_sanity(_map, nodes, verbose=False): if verbose: print(_map) print(("x: %s\n" % str(x))) print((type, _map.__name__)) _config = {'type':"blocking", 'threads':False, 'nproc':nodes, 'ncpus':nodes} mapconfig = {'nodes':nodes} start = time.time() res = _map(squared, x, **mapconfig) end = time.time() - start if verbose: print(( "time to results:", end)) print(( "y: %s\n" % str(res))) assert res == x2 mapconfig.update(_config) res = _map(squared, x, **mapconfig) assert res == x2 mapconfig.update({'program':'hostname','workdir':'.','file':''}) res = _map(squared, x, **mapconfig) assert res == x2 from pyina.mappers import worker_pool mapconfig.update({'mapper':worker_pool,'timelimit':'00:00:02'}) res = _map(squared, x, **mapconfig) assert res == x2 def check_maps(_map, nodes, items=4, delay=0 ): _x = range(int(-items/2), int(items/2),2) _y = range(len(_x)) _d = [delay]*len(_x) _z = [0]*len(_x) #print map res1 = list(map(busy_squared, _x)) mapconfig = {'nodes':nodes} #print _map _res1 = _map(busy_squared, _x, **mapconfig) assert _res1 == res1 res2 = list(map(busy_add, _x, _y, _d)) _res2 = _map(busy_add, _x, _y, _d, **mapconfig) assert _res2 == res2 #print "" def test_ezmap(): from pyina.ez_map import ez_map as _map nodes=4 check_sanity( _map, nodes ) check_maps( _map, nodes, items=items ) def test_ezmap2(): from pyina.ez_map import ez_map2 as _map nodes=4 check_sanity( _map, nodes ) check_maps( _map, nodes, items=items ) if __name__ == '__main__': test_ezmap() test_ezmap2() uqfoundation-pyina-c629452/pyina/tests/test_map.py000066400000000000000000000042241467660040300223040ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE verbose = False delay = 0.01 items = 100 def busy_add(x,y, delay=0.01): for n in range(x): x += n for n in range(y): y -= n import time time.sleep(delay) return x + y def timed_pool(pool, items=100, delay=0.1, verbose=False): _x = range(int(-items/2), int(items/2), 2) _y = range(len(_x)) _d = [delay]*len(_x) if verbose: print( pool) import time start = time.time() res = pool.map(busy_add, _x, _y, _d) _t = time.time() - start if verbose: print(("time to queue:", _t)) start = time.time() _sol_ = list(res) t_ = time.time() - start if verbose: print(("time to results:", t_, "\n")) return _sol_ class BuiltinPool(object): def map(self, *args): return map(*args) std = timed_pool(BuiltinPool(), items, delay=0, verbose=False) def check_serial(source=False): from pyina.launchers import SerialMapper as S pool = S(source=source) res = timed_pool(pool, items, delay, verbose) assert res == std def check_pool(source=False): from pyina.launchers import MpiPool as MPI pool = MPI(4, source=source) res = timed_pool(pool, items, delay, verbose) assert res == std def check_scatter(source=False): from pyina.launchers import MpiScatter as MPI pool = MPI(4, source=source) res = timed_pool(pool, items, delay, verbose) assert res == std def test_nosource(): check_serial() check_pool() check_scatter() def test_source(): check_serial(source=True) check_pool(source=True) check_scatter(source=True) if __name__ == '__main__': from pyina.mpi import _debug, _save #_save(True) #_debug(True) if verbose: print(("CONFIG: delay = %s" % delay)) print(("CONFIG: items = %s" % items)) print("") test_nosource() test_source() uqfoundation-pyina-c629452/pyina/tests/test_pool.py000066400000000000000000000027721467660040300225060ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from dill import source, temp def run_source(obj): _obj = source._wrap(obj) assert _obj(1.57) == obj(1.57) src = source.importable(obj, alias='_f') # LEEK: for 3.x, locals may not be modified # (see https://docs.python.org/3.6/library/functions.html#locals) # my_locals = locals() exec(src, globals(), my_locals) assert my_locals["_f"](1.57) == obj(1.57) name = source.getname(obj) assert name == obj.__name__ or src.split("=",1)[0].strip() def run_files(obj): f = temp.dump_source(obj, alias='_obj') _obj = temp.load_source(f) assert _obj(1.57) == obj(1.57) def run_pool(obj): from pyina.launchers import Mpi p = Mpi(2) x = [1,2,3] y = list(map(obj, x)) p.scatter = False assert p.map(obj, x) == y p.source = True assert p.map(obj, x) == y p.scatter = True assert p.map(obj, x) == y p.source = False assert p.map(obj, x) == y def test_pyina(): from math import sin f = lambda x:x+1 def g(x): return x+2 for func in [g, f, abs, sin]: run_source(func) run_files(func) run_pool(func) if __name__ == '__main__': test_pyina() uqfoundation-pyina-c629452/pyina/tests/test_simple.py000066400000000000000000000015031467660040300230150ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE # construct a target function def host(id): import socket return "Rank: %d -- %s" % (id, socket.gethostname()) def test_equal(): # get the parallel mapper from pyina.ez_map import ez_map from pyina.ez_map import ez_map2 # launch the parallel map of the target function results = ez_map(host, range(10), nodes=4) results2 = ez_map2(host, range(10), nodes=4) assert "\n".join(results) == "\n".join(results2) if __name__ == '__main__': test_equal() uqfoundation-pyina-c629452/pyina/tests/test_star.py000066400000000000000000000102741467660040300225020ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE import time x = range(18) delay = 0.01 items = 20 maxtries = 20 def busy_add(x,y, delay=0.01): for n in range(x): x += n for n in range(y): y -= n import time time.sleep(delay) return x + y def busy_squared(x): import time, random time.sleep(0.01*random.random()) return x*x def squared(x): return x*x def quad_factory(a=1, b=1, c=0): def quad(x): return a*x**2 + b*x + c return quad square_plus_one = quad_factory(2,0,1) x2 = list(map(squared, x)) def check_sanity(pool, verbose=False): if verbose: print(pool) print("x: %s\n" % str(x)) print(pool.map.__name__) # blocking map start = time.time() res = pool.map(squared, x) end = time.time() - start assert res == x2 if verbose: print("time to results:", end) print("y: %s\n" % str(res)) # print pool.imap.__name__ # iterative map #start = time.time() #res = pool.imap(squared, x) #fin = time.time() - start # get result from iterator #start = time.time() #res = list(res) #end = time.time() - start #assert res == x2 #if verbose: # print "time to queue:", fin # print "time to results:", end # print "y: %s\n" % str(res) # print pool.amap.__name__ # asyncronous map #start = time.time() #res = pool.amap(squared, x) #fin = time.time() - start # get result from result object #start = time.time() #res = res.get() #end = time.time() - start #assert res == x2 #if verbose: # print "time to queue:", fin # print "time to results:", end # print "y: %s\n" % str(res) def check_maps(pool, items=4, delay=0): _x = range(int(-items/2),int(items/2),2) _y = range(len(_x)) _d = [delay]*len(_x) _z = [0]*len(_x) #print map res1 = list(map(squared, _x)) res2 = list(map(busy_add, _x, _y, _z)) #print pool.map _res1 = pool.map(squared, _x) _res2 = pool.map(busy_add, _x, _y, _d) assert _res1 == res1 assert _res2 == res2 #print pool.imap #_res1 = pool.imap(squared, _x) #_res2 = pool.imap(busy_add, _x, _y, _d) #assert list(_res1) == res1 #assert list(_res2) == res2 #print pool.uimap #_res1 = pool.uimap(squared, _x) #_res2 = pool.uimap(busy_add, _x, _y, _d) #assert sorted(_res1) == sorted(res1) #assert sorted(_res2) == sorted(res2) #print pool.amap #_res1 = pool.amap(squared, _x) #_res2 = pool.amap(busy_add, _x, _y, _d) #assert _res1.get() == res1 #assert _res2.get() == res2 #print "" def check_dill(pool, verbose=False): # test function that should fail in pickle if verbose: print(pool) print("x: %s\n" % str(x)) print(pool.map.__name__) #start = time.time() try: res = pool.map(square_plus_one, x) except: assert False # should use a smarter test here... #end = time.time() - start # print "time to results:", end print("y: %s\n" % str(res)) assert True def check_ready(pool, maxtries, delay, verbose=True): if verbose: print(pool) m = pool.amap(busy_squared, x)# x) # print m.ready() # print m.wait(0) tries = 0 while not m.ready(): time.sleep(delay) tries += 1 if verbose: print("TRY: %s" % tries) if tries >= maxtries: if verbose: print("TIMEOUT") break #print m.ready() # print m.get(0) res = m.get() if verbose: print(res) z = [0]*len(x) assert res == map(squared, x)# x, z) assert tries > 0 assert maxtries > tries #should be True, may not be if CPU is SLOW def test_pool(): from pyina.launchers import MpiPool as Pool pool = Pool(nodes=4) check_sanity( pool ) check_maps( pool, items, delay ) check_dill( pool ) #check_ready( pool, maxtries, delay, verbose=False ) if __name__ == '__main__': test_pool() uqfoundation-pyina-c629452/pyina/tests/test_with.py000066400000000000000000000031411467660040300224770ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE from time import sleep PRIMES = [ 112272535095293, 112582705942171, 112272535095293, 115280095190773, 115797848077099, 1099726899285419] def is_prime(n): if n % 2 == 0: return False import math sqrt_n = int(math.floor(math.sqrt(n))) for i in range(3, sqrt_n + 1, 2): if n % i == 0: return False return True def sleep_add1(x): if x < 4: sleep(x/10.0) return x+1 def sleep_add2(x): if x < 4: sleep(x/10.0) return x+2 def run_with_multipool(Pool): #XXX: amap and imap -- NotImplementedError #inputs = range(10) #with Pool() as pool1: # res1 = pool1.amap(sleep_add1, inputs) #with Pool() as pool2: # res2 = pool2.amap(sleep_add2, inputs) with Pool() as pool3: #for number, prime in izip(PRIMES, pool3.imap(is_prime, PRIMES)): for number, prime in zip(PRIMES, pool3.map(is_prime, PRIMES)): assert prime if number != PRIMES[-1] else not prime #print ('%d is prime: %s' % (number, prime)) #assert res1.get() == [i+1 for i in inputs] #assert res2.get() == [i+2 for i in inputs] def test_with_mpipool(): from pyina.launchers import MpiPool run_with_multipool(MpiPool) if __name__ == '__main__': test_with_mpipool() uqfoundation-pyina-c629452/pyina/tools.py000077500000000000000000000177031467660040300204770ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ Various mpi python tools Main function exported are:: - ensure_mpi: make sure the script is called by mpi-enabled python - get_workload: get the workload the processor is responsible for """ def ensure_mpi(size = 1, doc = None): """ ensure that mpi-enabled python is being called with the appropriate size inputs: - size: minimum required size of the MPI world [default = 1] - doc: error string to throw if size restriction is violated """ if doc == None: doc = "Error: Requires MPI-enabled python with size >= %s" % size from pyina.mpi import world mpisize = world.Get_size() mpirank = world.Get_rank() if mpisize < size: if mpirank == 0: print(doc) import sys sys.exit() return def mpiprint(string="", end="\n", rank=0, comm=None): """print the given string to the given rank""" from pyina.mpi import world if comm is None: comm = world if not hasattr(rank, '__len__'): rank = (rank,) if comm.rank in rank: print((string+end,)) #XXX: has light load on *last* proc, heavy/equal on first proc from math import ceil def get_workload(index, nproc, popsize, skip=None): """returns the workload that this processor is responsible for index: int rank of node to calculate for nproc: int number of nodes popsize: int number of jobs skip: int rank of node upon which to not calculate (i.e. the master) returns (begin, end) index """ if skip is not None and skip < nproc: nproc = nproc - 1 if index == skip: skip = True elif index > skip: index = index - 1 n1 = nproc n2 = popsize iend = 0 for i in range(nproc): ibegin = iend ai = int( ceil( 1.0*n2/n1 )) n2 = n2 - ai n1 = n1 - 1 iend = iend + ai if i==index: break if skip is True: return (ibegin, ibegin) if (index < nproc) else (iend, iend) return (ibegin, iend) #XXX: (begin, end) index for a single element #FIXME: has light load on *last* proc, heavy/equal on master proc import numpy as np def balance_workload(nproc, popsize, *index, **kwds): """divide popsize elements on 'nproc' chunks nproc: int number of nodes popsize: int number of jobs index: int rank of node(s) to calculate for (using slice notation) skip: int rank of node upon which to not calculate (i.e. the master) returns (begin, end) index vectors""" _skip = False skip = kwds.get('skip', None) if skip is not None and skip < nproc: nproc = nproc - 1 _skip = True count = np.round(popsize/nproc) counts = count * np.ones(nproc, dtype=np.int) diff = popsize - count*nproc counts[:diff] += 1 begin = np.concatenate(([0], np.cumsum(counts)[:-1])) #return counts, index #XXX: (#jobs, begin index) for all elements if _skip: if skip == nproc: # remember: nproc has been reduced begin = np.append(begin, begin[-1]+counts[-1]) counts = np.append(counts, 0) else: begin = np.insert(begin, skip, begin[skip]) counts = np.insert(counts, skip, 0) if not index: return begin, begin+counts #XXX: (begin, end) index for all elements #if len(index) > 1: # return lookup((begin, begin+counts), *index) # index a slice return lookup((begin, begin+counts), *index) # index a single element def lookup(inputs, *index): """get tuple of inputs corresponding to the given index""" if len(index) == 1: index = index[0] else: index = slice(*index) return tuple(i.__getitem__(index) for i in inputs) def isoseconds(time): """calculate number of seconds from a given isoformat timestring""" from numbers import Integral if isinstance(time, Integral): return int(time) #XXX: allow this? import datetime d = 0 try: # allows seconds up to 59 #XXX: allow 60+ ? t = datetime.datetime.strptime(time, "%S").time() except ValueError: fmt = str(time).count(":") or 2 # get ValueError if no ":" if fmt == 1: t = datetime.datetime.strptime(time, "%H:%M").time() elif fmt == 3: # allows days (up to 31) t = datetime.datetime.strptime(time, "%d:%H:%M:%S") d,t = t.day, t.time() else: # maxtime is '23:59:59' #XXX: allow 24+ hours instead of days? t = datetime.datetime.strptime(time, "%H:%M:%S").time() return t.second + 60*t.minute + 3600*t.hour + d*86400 def isoformat(seconds): """generate an isoformat timestring for the given time in seconds""" import datetime d = seconds/86400 if d > 31: datetime.date(1900, 1, d) # throw ValueError h = (seconds - d*86400)/3600 m = (seconds - d*86400 - h*3600)/60 s = seconds - d*86400 - h*3600 - m*60 t = datetime.time(h,m,s).strftime("%H:%M:%S") return ("%s:" % d) + t if d else t #XXX: better convert days to hours? def which_mpirun(mpich=None, fullpath=False): """try to autodetect an available mpi launcher if mpich=True only look for mpich, if False only look for openmpi""" import os from pox import which progs = ['mpiexec', 'mpirun', 'mpiexec-mpich-mp', 'mpiexec-openmpi-mp', 'mpirun-mpich-mp', 'mpirun-openmpi-mp'] if mpich == True: pop = 'openmpi' elif mpich == False: pop = 'mpich' else: pop = 'THIS IS NOT THE MPI YOU ARE LOOKING FOR' progs = (i for i in progs if pop not in i) mpi = None for prog in progs: mpi = which(prog, ignore_errors=True) if mpi: break if mpi and not fullpath: mpi = os.path.split(mpi)[-1] return mpi def which_strategy(scatter=True, lazy=False, fullpath=True): """try to autodetect an available strategy (scatter or pool)""" target = 'ezscatter' if scatter else 'ezpool' import sys if (sys.platform[:3] == 'win'): lazy=False if lazy: target = "`which %s`" % target # lookup full path elif not lazy and fullpath: from pox import which target = which(target, ignore_errors=True) if not target: target = None #XXX: better None or "" ? return target def which_python(lazy=False, fullpath=True): "get an invocation for this python on the execution path" from pox import which_python # check if the versioned python is on the path py = which_python(lazy=False, version=True, fullpath=True) if not lazy and fullpath and py: return py import sys if (sys.platform[:3] == 'win'): lazy=False # if on the path, apply user's options return which_python(lazy=lazy, version=bool(py), fullpath=fullpath) # backward compatability from pox import wait_for if __name__=='__main__': n = 7 #12 pop = 12 #7 #XXX: note the two ways to calculate assert get_workload(0, n, pop) == balance_workload(n, pop, 0) assert [get_workload(i, n, pop) for i in range(n)] == \ zip(*balance_workload(n, pop)) assert [get_workload(i, n, pop) for i in range(0,n/2)] == \ zip(*balance_workload(n, pop, 0, n/2)) assert zip(*balance_workload(n,pop,0,n)) == zip(*balance_workload(n,pop)) assert zip(*balance_workload(n,pop,0,1)) == [balance_workload(n,pop,0)] assert get_workload(0,n,pop,skip=0) == balance_workload(n,pop,0,skip=0) assert get_workload(0,n,pop,skip=n) == balance_workload(n,pop,0,skip=n) assert get_workload(0,n,pop,skip=n+1) == balance_workload(n,pop,0,skip=n+1) assert [get_workload(i, n, pop, skip=0) for i in range(n)] == \ zip(*balance_workload(n, pop, skip=0)) assert [get_workload(i, n, pop, skip=n) for i in range(n)] == \ zip(*balance_workload(n, pop, skip=n)) # End of file uqfoundation-pyina-c629452/pyproject.toml000066400000000000000000000004641467660040300205520ustar00rootroot00000000000000[build-system] # Further build requirements come from setup.py via the PEP 517 interface requires = [ "setuptools>=42", "Cython>=0.29.30", #XXX: required to build numpy from source #"oldest-supported-numpy>=1.0", #XXX: oldest supported numpy with wheels ] build-backend = "setuptools.build_meta" uqfoundation-pyina-c629452/scripts/000077500000000000000000000000001467660040300173215ustar00rootroot00000000000000uqfoundation-pyina-c629452/scripts/ezpool000077500000000000000000000041351467660040300205620ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ helper script for ``pyina.mpi`` maps using the *'worker pool'* strategy Notes: this uses the same code as ``ezscatter``, but with ``pyina.mpi_pool``. Warning: this is a helper script for ``pyina.mpi.Mapper`` -- don't use it directly. """ import logging log = logging.getLogger("ezpool") log.addHandler(logging.StreamHandler()) def _debug(boolean): """print debug statements""" if boolean: log.setLevel(logging.DEBUG) else: log.setLevel(logging.WARN) return if __name__ == '__main__': from pyina.mpi_pool import parallel_map import dill as pickle import sys import os from pyina import mpi world = mpi.world funcname = sys.argv[1] argfilename = sys.argv[2] outfilename = sys.argv[3] if funcname.endswith('.pik'): # used pickled func workdir = None func = pickle.load(open(funcname,'rb')) else: # used tempfile for func workdir = sys.argv[4] sys.path = [workdir] + sys.path modname = os.path.splitext(os.path.basename(funcname))[0] module = __import__(modname) sys.path.pop(0) func = module.FUNC args,kwds = pickle.load(open(argfilename,'rb')) if world.rank == 0: log.info('funcname: %s' % funcname) # sys.argv[1] log.info('argfilename: %s' % argfilename) # sys.argv[2] log.info('outfilename: %s' % outfilename) # sys.argv[3] log.info('workdir: %s' % workdir) # sys.argv[4] log.info('func: %s' % func) log.info('args: %s' % str(args)) log.info('kwds: %s' % str(kwds)) res = parallel_map(func, *args, **kwds) #XXX: called on ALL nodes ? if world.rank == 0: log.info('res: %s' % str(res)) pickle.dump(res, open(outfilename,'wb')) # end of file uqfoundation-pyina-c629452/scripts/ezscatter000077500000000000000000000041461467660040300212600ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE """ helper script for ``pyina.mpi`` maps using the *'scatter gather'* strategy Notes: this uses the same code as ``ezpool``, but with ``pyina.mpi_scatter``. Warning: this is a helper script for ``pyina.mpi.Mapper`` -- don't use it directly. """ import logging log = logging.getLogger("ezscatter") log.addHandler(logging.StreamHandler()) def _debug(boolean): """print debug statements""" if boolean: log.setLevel(logging.DEBUG) else: log.setLevel(logging.WARN) return if __name__ == '__main__': from pyina.mpi_scatter import parallel_map import dill as pickle import sys import os from pyina import mpi world = mpi.world funcname = sys.argv[1] argfilename = sys.argv[2] outfilename = sys.argv[3] if funcname.endswith('.pik'): # used pickled func workdir = None func = pickle.load(open(funcname,'rb')) else: # used tempfile for func workdir = sys.argv[4] sys.path = [workdir] + sys.path modname = os.path.splitext(os.path.basename(funcname))[0] module = __import__(modname) sys.path.pop(0) func = module.FUNC args,kwds = pickle.load(open(argfilename,'rb')) if world.rank == 0: log.info('funcname: %s' % funcname) # sys.argv[1] log.info('argfilename: %s' % argfilename) # sys.argv[2] log.info('outfilename: %s' % outfilename) # sys.argv[3] log.info('workdir: %s' % workdir) # sys.argv[4] log.info('func: %s' % func) log.info('args: %s' % str(args)) log.info('kwds: %s' % str(kwds)) res = parallel_map(func, *args, **kwds) #XXX: called on ALL nodes ? if world.rank == 0: log.info('res: %s' % str(res)) pickle.dump(res, open(outfilename,'wb')) # end of file uqfoundation-pyina-c629452/scripts/mpi_world000077500000000000000000000032331467660040300212440ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE # # helper script to setup your mpi environment import pyina.__main__ from pyina.__main__ import * __doc__ = pyina.__main__.__doc__ if __name__=="__main__": import sys from pyina.launchers import MpiPool if sys.argv[-1] == "--kill": print("killing all...") kill_all() elif len(sys.argv) > 2: if sys.argv[1] == "--workers": print("seting up mpi...") MASTERINFO = set_master() nodes = sys.argv[2:] nodes = [node.strip('[()]').strip(',').strip() for node in nodes] #nodes = nodes.strip('[()]').split(',') set_workers(nodes,MASTERINFO) #elif sys.argv[1] == "--alias": # print "setting up mpi python..." # nodes = sys.argv[2:] # nodes = [node.strip('[()]').strip(',').strip() for node in nodes] # for node in nodes: # alias(int(node)) elif sys.argv[1] == "--fetch": nnodes = int(sys.argv[2]) try: pool = MpiPool() pool.nodes = nnodes hostnames = pool.map(host, range(nnodes)) print('\n'.join(hostnames)) except: # "--help" print(__doc__) else: # "--help" print(__doc__) else: # "--help" print(__doc__) # End of file uqfoundation-pyina-c629452/setup.cfg000066400000000000000000000001761467660040300174570ustar00rootroot00000000000000[egg_info] #tag_build = .dev0 [bdist_wheel] #python-tag = py3 #plat-name = manylinux_2_28_x86_64 [sdist] #formats=zip,gztar uqfoundation-pyina-c629452/setup.py000066400000000000000000000135111467660040300173450ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 1997-2016 California Institute of Technology. # Copyright (c) 2016-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE import os import sys # drop support for older python if sys.version_info < (3, 8): unsupported = 'Versions of Python before 3.8 are not supported' raise ValueError(unsupported) # get distribution meta info here = os.path.abspath(os.path.dirname(__file__)) sys.path.append(here) from version import (__version__, __author__, __contact__ as AUTHOR_EMAIL, get_license_text, get_readme_as_rst, write_info_file) LICENSE = get_license_text(os.path.join(here, 'LICENSE')) README = get_readme_as_rst(os.path.join(here, 'README.md')) # write meta info file write_info_file(here, 'pyina', doc=README, license=LICENSE, version=__version__, author=__author__) del here, get_license_text, get_readme_as_rst, write_info_file # check if setuptools is available try: from setuptools import setup from setuptools.dist import Distribution has_setuptools = True except ImportError: from distutils.core import setup Distribution = object has_setuptools = False # platform-specific instructions sdkroot_set = False if sys.platform[:3] == 'win': pass else: #platform = linux or mac if sys.platform[:6] == 'darwin': # mpi4py has difficulty building on a Mac # see special installation instructions here: # http://mpi4py.scipy.org/docs/usrman/install.html import os try: sdkroot = os.environ['SDKROOT'] except KeyError: sdkroot = '/' os.environ['SDKROOT'] = sdkroot sdkroot_set = True pass pass # build the 'setup' call setup_kwds = dict( name="pyina", version=__version__, description="MPI parallel map and cluster scheduling", long_description = README.strip(), author = __author__, author_email = AUTHOR_EMAIL, maintainer = __author__, maintainer_email = AUTHOR_EMAIL, license = 'BSD-3-Clause', platforms = ['Linux', 'Mac'], url = 'https://github.com/uqfoundation/pyina', download_url = 'https://pypi.org/project/pyina/#files', project_urls = { 'Documentation':'http://pyina.rtfd.io', 'Source Code':'https://github.com/uqfoundation/pyina', 'Bug Tracker':'https://github.com/uqfoundation/pyina/issues', }, python_requires = '>=3.8', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Intended Audience :: Science/Research', 'License :: OSI Approved :: BSD License', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.8', 'Programming Language :: Python :: 3.9', 'Programming Language :: Python :: 3.10', 'Programming Language :: Python :: 3.11', 'Programming Language :: Python :: 3.12', 'Programming Language :: Python :: 3.13', 'Programming Language :: Python :: Implementation :: CPython', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Scientific/Engineering', 'Topic :: Software Development', ], packages=['pyina','pyina.tests'], package_dir={'pyina':'pyina','pyina.tests':'pyina/tests'}, scripts=['scripts/ezpool','scripts/ezscatter','scripts/mpi_world'], ) # force python-, abi-, and platform-specific naming of bdist_wheel class BinaryDistribution(Distribution): """Distribution which forces a binary package with platform name""" def has_ext_modules(foo): return True # define dependencies dill_version = 'dill>=0.3.9' pox_version = 'pox>=0.3.5' pathos_version = 'pathos>=0.3.3' mystic_version = 'mystic>=0.4.2' cython_version = 'cython>=0.29.30' #XXX: required to build numpy from source numpy_version = 'numpy>=1.0' mpi4py_version = 'mpi4py>=1.3, !=3.0.2' # segfault 11 on MPI import # add dependencies depend = [numpy_version, dill_version, pox_version, pathos_version, mpi4py_version] extras = {'examples': [mystic_version]} # rtd fails for mpi4py, so mock it instead if os.environ.get('READTHEDOCS', None) == 'True': #NOTE: is on_rtd depend = depend[:-1] # update setup kwds if has_setuptools: setup_kwds.update( zip_safe=False, # distclass=BinaryDistribution, install_requires=depend, # extras_require=extras, ) # call setup setup(**setup_kwds) # if dependencies are missing, print a warning try: import numpy import dill import pox import pathos import mpi4py #XXX: throws an error even though ok? #import cython #import mystic except ImportError: print("\n***********************************************************") print("WARNING: One of the following dependencies may be unresolved:") print(" %s" % numpy_version) print(" %s" % dill_version) print(" %s" % pox_version) print(" %s" % pathos_version) print(" %s" % mpi4py_version) #print(" %s" % cython_version) #print(" %s (optional)" % mystic_version) print("***********************************************************\n") if sdkroot_set: print("\n***********************************************************") print("WARNING: One of following variables was set to a default:") print(" SDKROOT %s" % sdkroot) print("***********************************************************\n") else: pass try: import mpi4py except ImportError: print(""" You may need to set the environment variable "SDKROOT", as shown in the instructions for installing ``mpi4py``: http://mpi4py.scipy.org/docs/usrman/install.html """) if __name__=='__main__': pass # End of file uqfoundation-pyina-c629452/tox.ini000066400000000000000000000005061467660040300171460ustar00rootroot00000000000000[tox] skip_missing_interpreters= True envlist = py38 py39 py310 py311 py312 py313 pypy38 pypy39 pypy310 [testenv] deps = numpy mpi4py dill pox pathos whitelist_externals = # bash commands = {envpython} -m pip install . {envpython} pyina/tests/__main__.py uqfoundation-pyina-c629452/version.py000066400000000000000000000061511467660040300176740ustar00rootroot00000000000000#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2022-2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/pyina/blob/master/LICENSE __version__ = '0.3.0'#.dev0' __author__ = 'Mike McKerns' __contact__ = 'mmckerns@uqfoundation.org' def get_license_text(filepath): "open the LICENSE file and read the contents" try: LICENSE = open(filepath).read() except: LICENSE = '' return LICENSE def get_readme_as_rst(filepath): "open the README file and read the markdown as rst" try: fh = open(filepath) name, null = fh.readline().rstrip(), fh.readline() tag, null = fh.readline(), fh.readline() tag = "%s: %s" % (name, tag) split = '-'*(len(tag)-1)+'\n' README = ''.join((null,split,tag,split,'\n')) skip = False for line in fh: if line.startswith('['): continue elif skip and line.startswith(' http'): README += '\n' + line elif line.startswith('* '): README += line.replace('* ',' - ',1) elif line.startswith('-'): README += line.replace('-','=') + '\n' elif line.startswith('!['): # image alt,img = line.split('](',1) if img.startswith('docs'): # relative path img = img.split('docs/source/',1)[-1] # make is in docs README += '.. image:: ' + img.replace(')','') README += ' :alt: ' + alt.replace('![','') + '\n' #elif ')[http' in line: # alt text link (`text `_) else: README += line skip = line.endswith(':\n') fh.close() except: README = '' return README def write_info_file(dirpath, modulename, **info): """write the given info to 'modulename/__info__.py' info expects: doc: the module's long_description version: the module's version string author: the module's author string license: the module's license contents """ import os infofile = os.path.join(dirpath, '%s/__info__.py' % modulename) header = '''#!/usr/bin/env python # # Author: Mike McKerns (mmckerns @caltech and @uqfoundation) # Copyright (c) 2024 The Uncertainty Quantification Foundation. # License: 3-clause BSD. The full license text is available at: # - https://github.com/uqfoundation/%s/blob/master/LICENSE ''' % modulename #XXX: author and email are hardwired in the header doc = info.get('doc', None) version = info.get('version', None) author = info.get('author', None) license = info.get('license', None) with open(infofile, 'w') as fh: fh.write(header) if doc is not None: fh.write("'''%s'''\n\n" % doc) if version is not None: fh.write("__version__ = %r\n" % version) if author is not None: fh.write("__author__ = %r\n\n" % author) if license is not None: fh.write("__license__ = '''\n%s'''\n" % license) return